2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
50 #include <asm/system.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 #define DRV_MODULE_NAME "tg3"
67 #define TG3_MIN_NUM 117
68 #define DRV_MODULE_VERSION \
69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
70 #define DRV_MODULE_RELDATE "January 25, 2011"
72 #define TG3_DEF_MAC_MODE 0
73 #define TG3_DEF_RX_MODE 0
74 #define TG3_DEF_TX_MODE 0
75 #define TG3_DEF_MSG_ENABLE \
85 /* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
88 #define TG3_TX_TIMEOUT (5 * HZ)
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU 60
92 #define TG3_MAX_MTU(tp) \
93 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
99 #define TG3_RX_STD_RING_SIZE(tp) \
100 ((tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP) ? \
101 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JMB_RING_SIZE(tp) \
104 ((tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP) ? \
105 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
106 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
107 #define TG3_RSS_INDIR_TBL_SIZE 128
109 /* Do not place this n-ring entries value into the tp struct itself,
110 * we really want to expose these constants to GCC so that modulo et
111 * al. operations are done with shifts and masks instead of with
112 * hw multiply/modulo instructions. Another solution would be to
113 * replace things like '% foo' with '& (foo - 1)'.
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_STD_RING_BYTES(tp) \
120 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
121 #define TG3_RX_JMB_RING_BYTES(tp) \
122 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
123 #define TG3_RX_RCB_RING_BYTES(tp) \
124 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define TG3_DMA_BYTE_ENAB 64
131 #define TG3_RX_STD_DMA_SZ 1536
132 #define TG3_RX_JMB_DMA_SZ 9046
134 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
136 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
137 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
140 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
142 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
143 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
145 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
146 * that are at least dword aligned when used in PCIX mode. The driver
147 * works around this bug by double copying the packet. This workaround
148 * is built into the normal double copy length check for efficiency.
150 * However, the double copy is only necessary on those architectures
151 * where unaligned memory accesses are inefficient. For those architectures
152 * where unaligned memory accesses incur little penalty, we can reintegrate
153 * the 5701 in the normal rx path. Doing so saves a device structure
154 * dereference by hardcoding the double copy threshold in place.
156 #define TG3_RX_COPY_THRESHOLD 256
157 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
158 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
160 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
163 /* minimum number of free TX descriptors required to wake up TX process */
164 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
166 #define TG3_RAW_IP_ALIGN 2
168 /* number of ETHTOOL_GSTATS u64's */
169 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
171 #define TG3_NUM_TEST 6
173 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
175 #define FIRMWARE_TG3 "tigon/tg3.bin"
176 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
177 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
179 static char version[] __devinitdata =
180 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
182 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
183 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
184 MODULE_LICENSE("GPL");
185 MODULE_VERSION(DRV_MODULE_VERSION);
186 MODULE_FIRMWARE(FIRMWARE_TG3);
187 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
188 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
190 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
191 module_param(tg3_debug, int, 0);
192 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
194 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
268 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
269 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
270 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
271 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
272 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
273 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
274 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
278 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
280 static const struct {
281 const char string[ETH_GSTRING_LEN];
282 } ethtool_stats_keys[TG3_NUM_STATS] = {
285 { "rx_ucast_packets" },
286 { "rx_mcast_packets" },
287 { "rx_bcast_packets" },
289 { "rx_align_errors" },
290 { "rx_xon_pause_rcvd" },
291 { "rx_xoff_pause_rcvd" },
292 { "rx_mac_ctrl_rcvd" },
293 { "rx_xoff_entered" },
294 { "rx_frame_too_long_errors" },
296 { "rx_undersize_packets" },
297 { "rx_in_length_errors" },
298 { "rx_out_length_errors" },
299 { "rx_64_or_less_octet_packets" },
300 { "rx_65_to_127_octet_packets" },
301 { "rx_128_to_255_octet_packets" },
302 { "rx_256_to_511_octet_packets" },
303 { "rx_512_to_1023_octet_packets" },
304 { "rx_1024_to_1522_octet_packets" },
305 { "rx_1523_to_2047_octet_packets" },
306 { "rx_2048_to_4095_octet_packets" },
307 { "rx_4096_to_8191_octet_packets" },
308 { "rx_8192_to_9022_octet_packets" },
315 { "tx_flow_control" },
317 { "tx_single_collisions" },
318 { "tx_mult_collisions" },
320 { "tx_excessive_collisions" },
321 { "tx_late_collisions" },
322 { "tx_collide_2times" },
323 { "tx_collide_3times" },
324 { "tx_collide_4times" },
325 { "tx_collide_5times" },
326 { "tx_collide_6times" },
327 { "tx_collide_7times" },
328 { "tx_collide_8times" },
329 { "tx_collide_9times" },
330 { "tx_collide_10times" },
331 { "tx_collide_11times" },
332 { "tx_collide_12times" },
333 { "tx_collide_13times" },
334 { "tx_collide_14times" },
335 { "tx_collide_15times" },
336 { "tx_ucast_packets" },
337 { "tx_mcast_packets" },
338 { "tx_bcast_packets" },
339 { "tx_carrier_sense_errors" },
343 { "dma_writeq_full" },
344 { "dma_write_prioq_full" },
348 { "rx_threshold_hit" },
350 { "dma_readq_full" },
351 { "dma_read_prioq_full" },
352 { "tx_comp_queue_full" },
354 { "ring_set_send_prod_index" },
355 { "ring_status_update" },
357 { "nic_avoided_irqs" },
358 { "nic_tx_threshold_hit" }
361 static const struct {
362 const char string[ETH_GSTRING_LEN];
363 } ethtool_test_keys[TG3_NUM_TEST] = {
364 { "nvram test (online) " },
365 { "link test (online) " },
366 { "register test (offline)" },
367 { "memory test (offline)" },
368 { "loopback test (offline)" },
369 { "interrupt test (offline)" },
372 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
374 writel(val, tp->regs + off);
377 static u32 tg3_read32(struct tg3 *tp, u32 off)
379 return readl(tp->regs + off);
382 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
384 writel(val, tp->aperegs + off);
387 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
389 return readl(tp->aperegs + off);
392 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
396 spin_lock_irqsave(&tp->indirect_lock, flags);
397 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
398 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
399 spin_unlock_irqrestore(&tp->indirect_lock, flags);
402 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
404 writel(val, tp->regs + off);
405 readl(tp->regs + off);
408 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
413 spin_lock_irqsave(&tp->indirect_lock, flags);
414 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
415 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
416 spin_unlock_irqrestore(&tp->indirect_lock, flags);
420 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
424 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
425 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
426 TG3_64BIT_REG_LOW, val);
429 if (off == TG3_RX_STD_PROD_IDX_REG) {
430 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
431 TG3_64BIT_REG_LOW, val);
435 spin_lock_irqsave(&tp->indirect_lock, flags);
436 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
437 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
438 spin_unlock_irqrestore(&tp->indirect_lock, flags);
440 /* In indirect mode when disabling interrupts, we also need
441 * to clear the interrupt bit in the GRC local ctrl register.
443 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
445 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
446 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
450 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
455 spin_lock_irqsave(&tp->indirect_lock, flags);
456 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
457 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
458 spin_unlock_irqrestore(&tp->indirect_lock, flags);
462 /* usec_wait specifies the wait time in usec when writing to certain registers
463 * where it is unsafe to read back the register without some delay.
464 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
465 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
467 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
469 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
470 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
471 /* Non-posted methods */
472 tp->write32(tp, off, val);
475 tg3_write32(tp, off, val);
480 /* Wait again after the read for the posted method to guarantee that
481 * the wait time is met.
487 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
489 tp->write32_mbox(tp, off, val);
490 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
491 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
492 tp->read32_mbox(tp, off);
495 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
497 void __iomem *mbox = tp->regs + off;
499 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
501 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
505 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
507 return readl(tp->regs + off + GRCMBOX_BASE);
510 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
512 writel(val, tp->regs + off + GRCMBOX_BASE);
515 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
516 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
517 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
518 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
519 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
521 #define tw32(reg, val) tp->write32(tp, reg, val)
522 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
523 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
524 #define tr32(reg) tp->read32(tp, reg)
526 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
530 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
531 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
534 spin_lock_irqsave(&tp->indirect_lock, flags);
535 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
536 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
537 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
539 /* Always leave this as zero. */
540 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
542 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
543 tw32_f(TG3PCI_MEM_WIN_DATA, val);
545 /* Always leave this as zero. */
546 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
548 spin_unlock_irqrestore(&tp->indirect_lock, flags);
551 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
555 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
556 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
561 spin_lock_irqsave(&tp->indirect_lock, flags);
562 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
563 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
564 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
566 /* Always leave this as zero. */
567 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
569 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
570 *val = tr32(TG3PCI_MEM_WIN_DATA);
572 /* Always leave this as zero. */
573 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
575 spin_unlock_irqrestore(&tp->indirect_lock, flags);
578 static void tg3_ape_lock_init(struct tg3 *tp)
583 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
584 regbase = TG3_APE_LOCK_GRANT;
586 regbase = TG3_APE_PER_LOCK_GRANT;
588 /* Make sure the driver hasn't any stale locks. */
589 for (i = 0; i < 8; i++)
590 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
593 static int tg3_ape_lock(struct tg3 *tp, int locknum)
597 u32 status, req, gnt;
599 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
603 case TG3_APE_LOCK_GRC:
604 case TG3_APE_LOCK_MEM:
610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
611 req = TG3_APE_LOCK_REQ;
612 gnt = TG3_APE_LOCK_GRANT;
614 req = TG3_APE_PER_LOCK_REQ;
615 gnt = TG3_APE_PER_LOCK_GRANT;
620 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
622 /* Wait for up to 1 millisecond to acquire lock. */
623 for (i = 0; i < 100; i++) {
624 status = tg3_ape_read32(tp, gnt + off);
625 if (status == APE_LOCK_GRANT_DRIVER)
630 if (status != APE_LOCK_GRANT_DRIVER) {
631 /* Revoke the lock request. */
632 tg3_ape_write32(tp, gnt + off,
633 APE_LOCK_GRANT_DRIVER);
641 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
645 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
649 case TG3_APE_LOCK_GRC:
650 case TG3_APE_LOCK_MEM:
656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
657 gnt = TG3_APE_LOCK_GRANT;
659 gnt = TG3_APE_PER_LOCK_GRANT;
661 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
664 static void tg3_disable_ints(struct tg3 *tp)
668 tw32(TG3PCI_MISC_HOST_CTRL,
669 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
670 for (i = 0; i < tp->irq_max; i++)
671 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
674 static void tg3_enable_ints(struct tg3 *tp)
681 tw32(TG3PCI_MISC_HOST_CTRL,
682 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
684 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
685 for (i = 0; i < tp->irq_cnt; i++) {
686 struct tg3_napi *tnapi = &tp->napi[i];
688 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
689 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
690 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
692 tp->coal_now |= tnapi->coal_now;
695 /* Force an initial interrupt */
696 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
697 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
698 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
700 tw32(HOSTCC_MODE, tp->coal_now);
702 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
705 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
707 struct tg3 *tp = tnapi->tp;
708 struct tg3_hw_status *sblk = tnapi->hw_status;
709 unsigned int work_exists = 0;
711 /* check for phy events */
712 if (!(tp->tg3_flags &
713 (TG3_FLAG_USE_LINKCHG_REG |
714 TG3_FLAG_POLL_SERDES))) {
715 if (sblk->status & SD_STATUS_LINK_CHG)
718 /* check for RX/TX work to do */
719 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
720 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
727 * similar to tg3_enable_ints, but it accurately determines whether there
728 * is new work pending and can return without flushing the PIO write
729 * which reenables interrupts
731 static void tg3_int_reenable(struct tg3_napi *tnapi)
733 struct tg3 *tp = tnapi->tp;
735 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
738 /* When doing tagged status, this work check is unnecessary.
739 * The last_tag we write above tells the chip which piece of
740 * work we've completed.
742 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
744 tw32(HOSTCC_MODE, tp->coalesce_mode |
745 HOSTCC_MODE_ENABLE | tnapi->coal_now);
748 static void tg3_switch_clocks(struct tg3 *tp)
753 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
754 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
757 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
759 orig_clock_ctrl = clock_ctrl;
760 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
761 CLOCK_CTRL_CLKRUN_OENABLE |
763 tp->pci_clock_ctrl = clock_ctrl;
765 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
766 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
767 tw32_wait_f(TG3PCI_CLOCK_CTRL,
768 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
770 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
771 tw32_wait_f(TG3PCI_CLOCK_CTRL,
773 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
775 tw32_wait_f(TG3PCI_CLOCK_CTRL,
776 clock_ctrl | (CLOCK_CTRL_ALTCLK),
779 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
782 #define PHY_BUSY_LOOPS 5000
784 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
790 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
792 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
798 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
799 MI_COM_PHY_ADDR_MASK);
800 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
801 MI_COM_REG_ADDR_MASK);
802 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
804 tw32_f(MAC_MI_COM, frame_val);
806 loops = PHY_BUSY_LOOPS;
809 frame_val = tr32(MAC_MI_COM);
811 if ((frame_val & MI_COM_BUSY) == 0) {
813 frame_val = tr32(MAC_MI_COM);
821 *val = frame_val & MI_COM_DATA_MASK;
825 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
826 tw32_f(MAC_MI_MODE, tp->mi_mode);
833 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
839 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
840 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
843 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
845 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
849 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
850 MI_COM_PHY_ADDR_MASK);
851 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852 MI_COM_REG_ADDR_MASK);
853 frame_val |= (val & MI_COM_DATA_MASK);
854 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
856 tw32_f(MAC_MI_COM, frame_val);
858 loops = PHY_BUSY_LOOPS;
861 frame_val = tr32(MAC_MI_COM);
862 if ((frame_val & MI_COM_BUSY) == 0) {
864 frame_val = tr32(MAC_MI_COM);
874 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
875 tw32_f(MAC_MI_MODE, tp->mi_mode);
882 static int tg3_bmcr_reset(struct tg3 *tp)
887 /* OK, reset it, and poll the BMCR_RESET bit until it
888 * clears or we time out.
890 phy_control = BMCR_RESET;
891 err = tg3_writephy(tp, MII_BMCR, phy_control);
897 err = tg3_readphy(tp, MII_BMCR, &phy_control);
901 if ((phy_control & BMCR_RESET) == 0) {
913 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
915 struct tg3 *tp = bp->priv;
918 spin_lock_bh(&tp->lock);
920 if (tg3_readphy(tp, reg, &val))
923 spin_unlock_bh(&tp->lock);
928 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
930 struct tg3 *tp = bp->priv;
933 spin_lock_bh(&tp->lock);
935 if (tg3_writephy(tp, reg, val))
938 spin_unlock_bh(&tp->lock);
943 static int tg3_mdio_reset(struct mii_bus *bp)
948 static void tg3_mdio_config_5785(struct tg3 *tp)
951 struct phy_device *phydev;
953 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
954 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
955 case PHY_ID_BCM50610:
956 case PHY_ID_BCM50610M:
957 val = MAC_PHYCFG2_50610_LED_MODES;
959 case PHY_ID_BCMAC131:
960 val = MAC_PHYCFG2_AC131_LED_MODES;
962 case PHY_ID_RTL8211C:
963 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
965 case PHY_ID_RTL8201E:
966 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
972 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
973 tw32(MAC_PHYCFG2, val);
975 val = tr32(MAC_PHYCFG1);
976 val &= ~(MAC_PHYCFG1_RGMII_INT |
977 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
978 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
979 tw32(MAC_PHYCFG1, val);
984 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
985 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
986 MAC_PHYCFG2_FMODE_MASK_MASK |
987 MAC_PHYCFG2_GMODE_MASK_MASK |
988 MAC_PHYCFG2_ACT_MASK_MASK |
989 MAC_PHYCFG2_QUAL_MASK_MASK |
990 MAC_PHYCFG2_INBAND_ENABLE;
992 tw32(MAC_PHYCFG2, val);
994 val = tr32(MAC_PHYCFG1);
995 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
996 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
997 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
998 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
999 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1000 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1001 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1003 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1004 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1005 tw32(MAC_PHYCFG1, val);
1007 val = tr32(MAC_EXT_RGMII_MODE);
1008 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1009 MAC_RGMII_MODE_RX_QUALITY |
1010 MAC_RGMII_MODE_RX_ACTIVITY |
1011 MAC_RGMII_MODE_RX_ENG_DET |
1012 MAC_RGMII_MODE_TX_ENABLE |
1013 MAC_RGMII_MODE_TX_LOWPWR |
1014 MAC_RGMII_MODE_TX_RESET);
1015 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1016 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1017 val |= MAC_RGMII_MODE_RX_INT_B |
1018 MAC_RGMII_MODE_RX_QUALITY |
1019 MAC_RGMII_MODE_RX_ACTIVITY |
1020 MAC_RGMII_MODE_RX_ENG_DET;
1021 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1022 val |= MAC_RGMII_MODE_TX_ENABLE |
1023 MAC_RGMII_MODE_TX_LOWPWR |
1024 MAC_RGMII_MODE_TX_RESET;
1026 tw32(MAC_EXT_RGMII_MODE, val);
1029 static void tg3_mdio_start(struct tg3 *tp)
1031 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1032 tw32_f(MAC_MI_MODE, tp->mi_mode);
1035 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1037 tg3_mdio_config_5785(tp);
1040 static int tg3_mdio_init(struct tg3 *tp)
1044 struct phy_device *phydev;
1046 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
1049 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1051 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1052 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1054 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1055 TG3_CPMU_PHY_STRAP_IS_SERDES;
1059 tp->phy_addr = TG3_PHY_MII_ADDR;
1063 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1064 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1067 tp->mdio_bus = mdiobus_alloc();
1068 if (tp->mdio_bus == NULL)
1071 tp->mdio_bus->name = "tg3 mdio bus";
1072 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1073 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1074 tp->mdio_bus->priv = tp;
1075 tp->mdio_bus->parent = &tp->pdev->dev;
1076 tp->mdio_bus->read = &tg3_mdio_read;
1077 tp->mdio_bus->write = &tg3_mdio_write;
1078 tp->mdio_bus->reset = &tg3_mdio_reset;
1079 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1080 tp->mdio_bus->irq = &tp->mdio_irq[0];
1082 for (i = 0; i < PHY_MAX_ADDR; i++)
1083 tp->mdio_bus->irq[i] = PHY_POLL;
1085 /* The bus registration will look for all the PHYs on the mdio bus.
1086 * Unfortunately, it does not ensure the PHY is powered up before
1087 * accessing the PHY ID registers. A chip reset is the
1088 * quickest way to bring the device back to an operational state..
1090 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1093 i = mdiobus_register(tp->mdio_bus);
1095 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1096 mdiobus_free(tp->mdio_bus);
1100 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1102 if (!phydev || !phydev->drv) {
1103 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1104 mdiobus_unregister(tp->mdio_bus);
1105 mdiobus_free(tp->mdio_bus);
1109 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1110 case PHY_ID_BCM57780:
1111 phydev->interface = PHY_INTERFACE_MODE_GMII;
1112 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1114 case PHY_ID_BCM50610:
1115 case PHY_ID_BCM50610M:
1116 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1117 PHY_BRCM_RX_REFCLK_UNUSED |
1118 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1119 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1120 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1121 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1122 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1123 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1124 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1125 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1127 case PHY_ID_RTL8211C:
1128 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1130 case PHY_ID_RTL8201E:
1131 case PHY_ID_BCMAC131:
1132 phydev->interface = PHY_INTERFACE_MODE_MII;
1133 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1134 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1138 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1140 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1141 tg3_mdio_config_5785(tp);
1146 static void tg3_mdio_fini(struct tg3 *tp)
1148 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1149 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1150 mdiobus_unregister(tp->mdio_bus);
1151 mdiobus_free(tp->mdio_bus);
1155 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1159 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1163 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1167 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1168 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1172 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1178 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1182 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1186 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1190 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1191 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1195 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1201 /* tp->lock is held. */
1202 static inline void tg3_generate_fw_event(struct tg3 *tp)
1206 val = tr32(GRC_RX_CPU_EVENT);
1207 val |= GRC_RX_CPU_DRIVER_EVENT;
1208 tw32_f(GRC_RX_CPU_EVENT, val);
1210 tp->last_event_jiffies = jiffies;
1213 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1215 /* tp->lock is held. */
1216 static void tg3_wait_for_event_ack(struct tg3 *tp)
1219 unsigned int delay_cnt;
1222 /* If enough time has passed, no wait is necessary. */
1223 time_remain = (long)(tp->last_event_jiffies + 1 +
1224 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1226 if (time_remain < 0)
1229 /* Check if we can shorten the wait time. */
1230 delay_cnt = jiffies_to_usecs(time_remain);
1231 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1232 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1233 delay_cnt = (delay_cnt >> 3) + 1;
1235 for (i = 0; i < delay_cnt; i++) {
1236 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1242 /* tp->lock is held. */
1243 static void tg3_ump_link_report(struct tg3 *tp)
1248 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1249 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1252 tg3_wait_for_event_ack(tp);
1254 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1256 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1259 if (!tg3_readphy(tp, MII_BMCR, ®))
1261 if (!tg3_readphy(tp, MII_BMSR, ®))
1262 val |= (reg & 0xffff);
1263 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1266 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1268 if (!tg3_readphy(tp, MII_LPA, ®))
1269 val |= (reg & 0xffff);
1270 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1273 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1274 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1276 if (!tg3_readphy(tp, MII_STAT1000, ®))
1277 val |= (reg & 0xffff);
1279 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1281 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1285 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1287 tg3_generate_fw_event(tp);
1290 static void tg3_link_report(struct tg3 *tp)
1292 if (!netif_carrier_ok(tp->dev)) {
1293 netif_info(tp, link, tp->dev, "Link is down\n");
1294 tg3_ump_link_report(tp);
1295 } else if (netif_msg_link(tp)) {
1296 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1297 (tp->link_config.active_speed == SPEED_1000 ?
1299 (tp->link_config.active_speed == SPEED_100 ?
1301 (tp->link_config.active_duplex == DUPLEX_FULL ?
1304 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1305 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1307 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1309 tg3_ump_link_report(tp);
1313 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1317 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1318 miireg = ADVERTISE_PAUSE_CAP;
1319 else if (flow_ctrl & FLOW_CTRL_TX)
1320 miireg = ADVERTISE_PAUSE_ASYM;
1321 else if (flow_ctrl & FLOW_CTRL_RX)
1322 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1329 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1333 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1334 miireg = ADVERTISE_1000XPAUSE;
1335 else if (flow_ctrl & FLOW_CTRL_TX)
1336 miireg = ADVERTISE_1000XPSE_ASYM;
1337 else if (flow_ctrl & FLOW_CTRL_RX)
1338 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1345 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1349 if (lcladv & ADVERTISE_1000XPAUSE) {
1350 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1351 if (rmtadv & LPA_1000XPAUSE)
1352 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1353 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1356 if (rmtadv & LPA_1000XPAUSE)
1357 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1359 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1360 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1367 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1371 u32 old_rx_mode = tp->rx_mode;
1372 u32 old_tx_mode = tp->tx_mode;
1374 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1375 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1377 autoneg = tp->link_config.autoneg;
1379 if (autoneg == AUTONEG_ENABLE &&
1380 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1381 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1382 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1384 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1386 flowctrl = tp->link_config.flowctrl;
1388 tp->link_config.active_flowctrl = flowctrl;
1390 if (flowctrl & FLOW_CTRL_RX)
1391 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1393 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1395 if (old_rx_mode != tp->rx_mode)
1396 tw32_f(MAC_RX_MODE, tp->rx_mode);
1398 if (flowctrl & FLOW_CTRL_TX)
1399 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1401 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1403 if (old_tx_mode != tp->tx_mode)
1404 tw32_f(MAC_TX_MODE, tp->tx_mode);
1407 static void tg3_adjust_link(struct net_device *dev)
1409 u8 oldflowctrl, linkmesg = 0;
1410 u32 mac_mode, lcl_adv, rmt_adv;
1411 struct tg3 *tp = netdev_priv(dev);
1412 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1414 spin_lock_bh(&tp->lock);
1416 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1417 MAC_MODE_HALF_DUPLEX);
1419 oldflowctrl = tp->link_config.active_flowctrl;
1425 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1426 mac_mode |= MAC_MODE_PORT_MODE_MII;
1427 else if (phydev->speed == SPEED_1000 ||
1428 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1429 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1431 mac_mode |= MAC_MODE_PORT_MODE_MII;
1433 if (phydev->duplex == DUPLEX_HALF)
1434 mac_mode |= MAC_MODE_HALF_DUPLEX;
1436 lcl_adv = tg3_advert_flowctrl_1000T(
1437 tp->link_config.flowctrl);
1440 rmt_adv = LPA_PAUSE_CAP;
1441 if (phydev->asym_pause)
1442 rmt_adv |= LPA_PAUSE_ASYM;
1445 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1447 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1449 if (mac_mode != tp->mac_mode) {
1450 tp->mac_mode = mac_mode;
1451 tw32_f(MAC_MODE, tp->mac_mode);
1455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1456 if (phydev->speed == SPEED_10)
1458 MAC_MI_STAT_10MBPS_MODE |
1459 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1461 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1464 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1465 tw32(MAC_TX_LENGTHS,
1466 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1467 (6 << TX_LENGTHS_IPG_SHIFT) |
1468 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1470 tw32(MAC_TX_LENGTHS,
1471 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1472 (6 << TX_LENGTHS_IPG_SHIFT) |
1473 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1475 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1476 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1477 phydev->speed != tp->link_config.active_speed ||
1478 phydev->duplex != tp->link_config.active_duplex ||
1479 oldflowctrl != tp->link_config.active_flowctrl)
1482 tp->link_config.active_speed = phydev->speed;
1483 tp->link_config.active_duplex = phydev->duplex;
1485 spin_unlock_bh(&tp->lock);
1488 tg3_link_report(tp);
1491 static int tg3_phy_init(struct tg3 *tp)
1493 struct phy_device *phydev;
1495 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1498 /* Bring the PHY back to a known state. */
1501 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1503 /* Attach the MAC to the PHY. */
1504 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1505 phydev->dev_flags, phydev->interface);
1506 if (IS_ERR(phydev)) {
1507 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1508 return PTR_ERR(phydev);
1511 /* Mask with MAC supported features. */
1512 switch (phydev->interface) {
1513 case PHY_INTERFACE_MODE_GMII:
1514 case PHY_INTERFACE_MODE_RGMII:
1515 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1516 phydev->supported &= (PHY_GBIT_FEATURES |
1518 SUPPORTED_Asym_Pause);
1522 case PHY_INTERFACE_MODE_MII:
1523 phydev->supported &= (PHY_BASIC_FEATURES |
1525 SUPPORTED_Asym_Pause);
1528 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1532 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1534 phydev->advertising = phydev->supported;
1539 static void tg3_phy_start(struct tg3 *tp)
1541 struct phy_device *phydev;
1543 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1548 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1549 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1550 phydev->speed = tp->link_config.orig_speed;
1551 phydev->duplex = tp->link_config.orig_duplex;
1552 phydev->autoneg = tp->link_config.orig_autoneg;
1553 phydev->advertising = tp->link_config.orig_advertising;
1558 phy_start_aneg(phydev);
1561 static void tg3_phy_stop(struct tg3 *tp)
1563 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1566 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1569 static void tg3_phy_fini(struct tg3 *tp)
1571 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1572 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1573 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1577 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1581 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1583 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1588 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1592 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1594 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1599 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1603 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1606 tg3_writephy(tp, MII_TG3_FET_TEST,
1607 phytest | MII_TG3_FET_SHADOW_EN);
1608 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1610 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1612 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1613 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1615 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1619 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1623 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1624 ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
1625 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1628 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1629 tg3_phy_fet_toggle_apd(tp, enable);
1633 reg = MII_TG3_MISC_SHDW_WREN |
1634 MII_TG3_MISC_SHDW_SCR5_SEL |
1635 MII_TG3_MISC_SHDW_SCR5_LPED |
1636 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1637 MII_TG3_MISC_SHDW_SCR5_SDTL |
1638 MII_TG3_MISC_SHDW_SCR5_C125OE;
1639 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1640 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1642 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1645 reg = MII_TG3_MISC_SHDW_WREN |
1646 MII_TG3_MISC_SHDW_APD_SEL |
1647 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1649 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1651 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1654 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1658 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1659 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1662 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1665 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1666 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1668 tg3_writephy(tp, MII_TG3_FET_TEST,
1669 ephy | MII_TG3_FET_SHADOW_EN);
1670 if (!tg3_readphy(tp, reg, &phy)) {
1672 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1674 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1675 tg3_writephy(tp, reg, phy);
1677 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1680 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1681 MII_TG3_AUXCTL_SHDWSEL_MISC;
1682 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1683 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1685 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1687 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1688 phy |= MII_TG3_AUXCTL_MISC_WREN;
1689 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1694 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1698 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1701 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1702 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1703 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1704 (val | (1 << 15) | (1 << 4)));
1707 static void tg3_phy_apply_otp(struct tg3 *tp)
1716 /* Enable SM_DSP clock and tx 6dB coding. */
1717 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1718 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1719 MII_TG3_AUXCTL_ACTL_TX_6DB;
1720 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1722 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1723 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1724 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1726 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1727 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1728 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1730 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1731 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1732 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1734 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1735 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1737 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1738 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1740 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1741 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1742 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1744 /* Turn off SM_DSP clock. */
1745 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1746 MII_TG3_AUXCTL_ACTL_TX_6DB;
1747 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1750 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1754 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1759 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1760 current_link_up == 1 &&
1761 tp->link_config.active_duplex == DUPLEX_FULL &&
1762 (tp->link_config.active_speed == SPEED_100 ||
1763 tp->link_config.active_speed == SPEED_1000)) {
1766 if (tp->link_config.active_speed == SPEED_1000)
1767 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1769 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1771 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1773 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1774 TG3_CL45_D7_EEERES_STAT, &val);
1777 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1778 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1781 case ASIC_REV_57765:
1782 /* Enable SM_DSP clock and tx 6dB coding. */
1783 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1784 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1785 MII_TG3_AUXCTL_ACTL_TX_6DB;
1786 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1788 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1790 /* Turn off SM_DSP clock. */
1791 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1792 MII_TG3_AUXCTL_ACTL_TX_6DB;
1793 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1796 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1801 if (!tp->setlpicnt) {
1802 val = tr32(TG3_CPMU_EEE_MODE);
1803 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1807 static int tg3_wait_macro_done(struct tg3 *tp)
1814 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1815 if ((tmp32 & 0x1000) == 0)
1825 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1827 static const u32 test_pat[4][6] = {
1828 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1829 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1830 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1831 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1835 for (chan = 0; chan < 4; chan++) {
1838 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1839 (chan * 0x2000) | 0x0200);
1840 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1842 for (i = 0; i < 6; i++)
1843 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1846 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1847 if (tg3_wait_macro_done(tp)) {
1852 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1853 (chan * 0x2000) | 0x0200);
1854 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1855 if (tg3_wait_macro_done(tp)) {
1860 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1861 if (tg3_wait_macro_done(tp)) {
1866 for (i = 0; i < 6; i += 2) {
1869 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1870 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1871 tg3_wait_macro_done(tp)) {
1877 if (low != test_pat[chan][i] ||
1878 high != test_pat[chan][i+1]) {
1879 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1880 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1881 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1891 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1895 for (chan = 0; chan < 4; chan++) {
1898 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1899 (chan * 0x2000) | 0x0200);
1900 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1901 for (i = 0; i < 6; i++)
1902 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1903 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1904 if (tg3_wait_macro_done(tp))
1911 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1913 u32 reg32, phy9_orig;
1914 int retries, do_phy_reset, err;
1920 err = tg3_bmcr_reset(tp);
1926 /* Disable transmitter and interrupt. */
1927 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1931 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1933 /* Set full-duplex, 1000 mbps. */
1934 tg3_writephy(tp, MII_BMCR,
1935 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1937 /* Set to master mode. */
1938 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1941 tg3_writephy(tp, MII_TG3_CTRL,
1942 (MII_TG3_CTRL_AS_MASTER |
1943 MII_TG3_CTRL_ENABLE_AS_MASTER));
1945 /* Enable SM_DSP_CLOCK and 6dB. */
1946 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1948 /* Block the PHY control access. */
1949 tg3_phydsp_write(tp, 0x8005, 0x0800);
1951 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1954 } while (--retries);
1956 err = tg3_phy_reset_chanpat(tp);
1960 tg3_phydsp_write(tp, 0x8005, 0x0000);
1962 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1963 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1965 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1966 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1967 /* Set Extended packet length bit for jumbo frames */
1968 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1970 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1973 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1975 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1977 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1984 /* This will reset the tigon3 PHY if there is no valid
1985 * link unless the FORCE argument is non-zero.
1987 static int tg3_phy_reset(struct tg3 *tp)
1992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1993 val = tr32(GRC_MISC_CFG);
1994 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1997 err = tg3_readphy(tp, MII_BMSR, &val);
1998 err |= tg3_readphy(tp, MII_BMSR, &val);
2002 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2003 netif_carrier_off(tp->dev);
2004 tg3_link_report(tp);
2007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2010 err = tg3_phy_reset_5703_4_5(tp);
2017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2018 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2019 cpmuctrl = tr32(TG3_CPMU_CTRL);
2020 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2022 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2025 err = tg3_bmcr_reset(tp);
2029 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2030 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2031 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2033 tw32(TG3_CPMU_CTRL, cpmuctrl);
2036 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2037 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2038 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2039 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2040 CPMU_LSPD_1000MB_MACCLK_12_5) {
2041 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2043 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2047 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
2048 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2051 tg3_phy_apply_otp(tp);
2053 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2054 tg3_phy_toggle_apd(tp, true);
2056 tg3_phy_toggle_apd(tp, false);
2059 if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
2060 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2061 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2062 tg3_phydsp_write(tp, 0x000a, 0x0323);
2063 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2065 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2066 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2067 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2069 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2070 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2071 tg3_phydsp_write(tp, 0x000a, 0x310b);
2072 tg3_phydsp_write(tp, 0x201f, 0x9506);
2073 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2074 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2075 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2076 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2077 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2078 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2079 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2080 tg3_writephy(tp, MII_TG3_TEST1,
2081 MII_TG3_TEST1_TRIM_EN | 0x4);
2083 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2084 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2086 /* Set Extended packet length bit (bit 14) on all chips that */
2087 /* support jumbo frames */
2088 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2089 /* Cannot do read-modify-write on 5401 */
2090 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2091 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2092 /* Set bit 14 with read-modify-write to preserve other bits */
2093 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2094 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
2095 tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
2098 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2099 * jumbo frames transmission.
2101 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2102 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2103 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2104 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2108 /* adjust output voltage */
2109 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2112 tg3_phy_toggle_automdix(tp, 1);
2113 tg3_phy_set_wirespeed(tp);
2117 static void tg3_frob_aux_power(struct tg3 *tp)
2119 bool need_vaux = false;
2121 /* The GPIOs do something completely different on 57765. */
2122 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2123 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2124 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2127 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2131 tp->pdev_peer != tp->pdev) {
2132 struct net_device *dev_peer;
2134 dev_peer = pci_get_drvdata(tp->pdev_peer);
2136 /* remove_one() may have been run on the peer. */
2138 struct tg3 *tp_peer = netdev_priv(dev_peer);
2140 if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE)
2143 if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2144 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF))
2149 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2150 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2156 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2157 (GRC_LCLCTRL_GPIO_OE0 |
2158 GRC_LCLCTRL_GPIO_OE1 |
2159 GRC_LCLCTRL_GPIO_OE2 |
2160 GRC_LCLCTRL_GPIO_OUTPUT0 |
2161 GRC_LCLCTRL_GPIO_OUTPUT1),
2163 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2164 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2165 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2166 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2167 GRC_LCLCTRL_GPIO_OE1 |
2168 GRC_LCLCTRL_GPIO_OE2 |
2169 GRC_LCLCTRL_GPIO_OUTPUT0 |
2170 GRC_LCLCTRL_GPIO_OUTPUT1 |
2172 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2174 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2175 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2177 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2178 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2181 u32 grc_local_ctrl = 0;
2183 /* Workaround to prevent overdrawing Amps. */
2184 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2186 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2187 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2188 grc_local_ctrl, 100);
2191 /* On 5753 and variants, GPIO2 cannot be used. */
2192 no_gpio2 = tp->nic_sram_data_cfg &
2193 NIC_SRAM_DATA_CFG_NO_GPIO2;
2195 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2196 GRC_LCLCTRL_GPIO_OE1 |
2197 GRC_LCLCTRL_GPIO_OE2 |
2198 GRC_LCLCTRL_GPIO_OUTPUT1 |
2199 GRC_LCLCTRL_GPIO_OUTPUT2;
2201 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2202 GRC_LCLCTRL_GPIO_OUTPUT2);
2204 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2205 grc_local_ctrl, 100);
2207 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2209 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2210 grc_local_ctrl, 100);
2213 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2214 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2215 grc_local_ctrl, 100);
2219 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2220 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2221 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2222 (GRC_LCLCTRL_GPIO_OE1 |
2223 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2225 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2226 GRC_LCLCTRL_GPIO_OE1, 100);
2228 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2229 (GRC_LCLCTRL_GPIO_OE1 |
2230 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2235 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2237 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2239 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2240 if (speed != SPEED_10)
2242 } else if (speed == SPEED_10)
2248 static int tg3_setup_phy(struct tg3 *, int);
2250 #define RESET_KIND_SHUTDOWN 0
2251 #define RESET_KIND_INIT 1
2252 #define RESET_KIND_SUSPEND 2
2254 static void tg3_write_sig_post_reset(struct tg3 *, int);
2255 static int tg3_halt_cpu(struct tg3 *, u32);
2257 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2261 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2262 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2263 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2264 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2267 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2268 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2269 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2276 val = tr32(GRC_MISC_CFG);
2277 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2280 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2282 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2285 tg3_writephy(tp, MII_ADVERTISE, 0);
2286 tg3_writephy(tp, MII_BMCR,
2287 BMCR_ANENABLE | BMCR_ANRESTART);
2289 tg3_writephy(tp, MII_TG3_FET_TEST,
2290 phytest | MII_TG3_FET_SHADOW_EN);
2291 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2292 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2294 MII_TG3_FET_SHDW_AUXMODE4,
2297 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2300 } else if (do_low_power) {
2301 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2302 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2304 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2305 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2306 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2307 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2308 MII_TG3_AUXCTL_PCTL_VREG_11V);
2311 /* The PHY should not be powered down on some chips because
2314 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2315 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2316 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2317 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2320 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2321 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2322 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2323 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2324 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2325 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2328 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2331 /* tp->lock is held. */
2332 static int tg3_nvram_lock(struct tg3 *tp)
2334 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2337 if (tp->nvram_lock_cnt == 0) {
2338 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2339 for (i = 0; i < 8000; i++) {
2340 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2345 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2349 tp->nvram_lock_cnt++;
2354 /* tp->lock is held. */
2355 static void tg3_nvram_unlock(struct tg3 *tp)
2357 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2358 if (tp->nvram_lock_cnt > 0)
2359 tp->nvram_lock_cnt--;
2360 if (tp->nvram_lock_cnt == 0)
2361 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2365 /* tp->lock is held. */
2366 static void tg3_enable_nvram_access(struct tg3 *tp)
2368 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2369 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2370 u32 nvaccess = tr32(NVRAM_ACCESS);
2372 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2376 /* tp->lock is held. */
2377 static void tg3_disable_nvram_access(struct tg3 *tp)
2379 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2380 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2381 u32 nvaccess = tr32(NVRAM_ACCESS);
2383 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2387 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2388 u32 offset, u32 *val)
2393 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2396 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2397 EEPROM_ADDR_DEVID_MASK |
2399 tw32(GRC_EEPROM_ADDR,
2401 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2402 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2403 EEPROM_ADDR_ADDR_MASK) |
2404 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2406 for (i = 0; i < 1000; i++) {
2407 tmp = tr32(GRC_EEPROM_ADDR);
2409 if (tmp & EEPROM_ADDR_COMPLETE)
2413 if (!(tmp & EEPROM_ADDR_COMPLETE))
2416 tmp = tr32(GRC_EEPROM_DATA);
2419 * The data will always be opposite the native endian
2420 * format. Perform a blind byteswap to compensate.
2427 #define NVRAM_CMD_TIMEOUT 10000
2429 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2433 tw32(NVRAM_CMD, nvram_cmd);
2434 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2436 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2442 if (i == NVRAM_CMD_TIMEOUT)
2448 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2450 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2451 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2452 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2453 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2454 (tp->nvram_jedecnum == JEDEC_ATMEL))
2456 addr = ((addr / tp->nvram_pagesize) <<
2457 ATMEL_AT45DB0X1B_PAGE_POS) +
2458 (addr % tp->nvram_pagesize);
2463 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2465 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2466 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2467 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2468 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2469 (tp->nvram_jedecnum == JEDEC_ATMEL))
2471 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2472 tp->nvram_pagesize) +
2473 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2478 /* NOTE: Data read in from NVRAM is byteswapped according to
2479 * the byteswapping settings for all other register accesses.
2480 * tg3 devices are BE devices, so on a BE machine, the data
2481 * returned will be exactly as it is seen in NVRAM. On a LE
2482 * machine, the 32-bit value will be byteswapped.
2484 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2488 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2489 return tg3_nvram_read_using_eeprom(tp, offset, val);
2491 offset = tg3_nvram_phys_addr(tp, offset);
2493 if (offset > NVRAM_ADDR_MSK)
2496 ret = tg3_nvram_lock(tp);
2500 tg3_enable_nvram_access(tp);
2502 tw32(NVRAM_ADDR, offset);
2503 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2504 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2507 *val = tr32(NVRAM_RDDATA);
2509 tg3_disable_nvram_access(tp);
2511 tg3_nvram_unlock(tp);
2516 /* Ensures NVRAM data is in bytestream format. */
2517 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2520 int res = tg3_nvram_read(tp, offset, &v);
2522 *val = cpu_to_be32(v);
2526 /* tp->lock is held. */
2527 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2529 u32 addr_high, addr_low;
2532 addr_high = ((tp->dev->dev_addr[0] << 8) |
2533 tp->dev->dev_addr[1]);
2534 addr_low = ((tp->dev->dev_addr[2] << 24) |
2535 (tp->dev->dev_addr[3] << 16) |
2536 (tp->dev->dev_addr[4] << 8) |
2537 (tp->dev->dev_addr[5] << 0));
2538 for (i = 0; i < 4; i++) {
2539 if (i == 1 && skip_mac_1)
2541 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2542 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2545 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2546 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2547 for (i = 0; i < 12; i++) {
2548 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2549 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2553 addr_high = (tp->dev->dev_addr[0] +
2554 tp->dev->dev_addr[1] +
2555 tp->dev->dev_addr[2] +
2556 tp->dev->dev_addr[3] +
2557 tp->dev->dev_addr[4] +
2558 tp->dev->dev_addr[5]) &
2559 TX_BACKOFF_SEED_MASK;
2560 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2563 static void tg3_enable_register_access(struct tg3 *tp)
2566 * Make sure register accesses (indirect or otherwise) will function
2569 pci_write_config_dword(tp->pdev,
2570 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2573 static int tg3_power_up(struct tg3 *tp)
2575 tg3_enable_register_access(tp);
2577 pci_set_power_state(tp->pdev, PCI_D0);
2579 /* Switch out of Vaux if it is a NIC */
2580 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2581 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2586 static int tg3_power_down_prepare(struct tg3 *tp)
2589 bool device_should_wake, do_low_power;
2591 tg3_enable_register_access(tp);
2593 /* Restore the CLKREQ setting. */
2594 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2597 pci_read_config_word(tp->pdev,
2598 tp->pcie_cap + PCI_EXP_LNKCTL,
2600 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2601 pci_write_config_word(tp->pdev,
2602 tp->pcie_cap + PCI_EXP_LNKCTL,
2606 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2607 tw32(TG3PCI_MISC_HOST_CTRL,
2608 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2610 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2611 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2613 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2614 do_low_power = false;
2615 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2616 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2617 struct phy_device *phydev;
2618 u32 phyid, advertising;
2620 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2622 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2624 tp->link_config.orig_speed = phydev->speed;
2625 tp->link_config.orig_duplex = phydev->duplex;
2626 tp->link_config.orig_autoneg = phydev->autoneg;
2627 tp->link_config.orig_advertising = phydev->advertising;
2629 advertising = ADVERTISED_TP |
2631 ADVERTISED_Autoneg |
2632 ADVERTISED_10baseT_Half;
2634 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2635 device_should_wake) {
2636 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2638 ADVERTISED_100baseT_Half |
2639 ADVERTISED_100baseT_Full |
2640 ADVERTISED_10baseT_Full;
2642 advertising |= ADVERTISED_10baseT_Full;
2645 phydev->advertising = advertising;
2647 phy_start_aneg(phydev);
2649 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2650 if (phyid != PHY_ID_BCMAC131) {
2651 phyid &= PHY_BCM_OUI_MASK;
2652 if (phyid == PHY_BCM_OUI_1 ||
2653 phyid == PHY_BCM_OUI_2 ||
2654 phyid == PHY_BCM_OUI_3)
2655 do_low_power = true;
2659 do_low_power = true;
2661 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2662 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2663 tp->link_config.orig_speed = tp->link_config.speed;
2664 tp->link_config.orig_duplex = tp->link_config.duplex;
2665 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2668 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2669 tp->link_config.speed = SPEED_10;
2670 tp->link_config.duplex = DUPLEX_HALF;
2671 tp->link_config.autoneg = AUTONEG_ENABLE;
2672 tg3_setup_phy(tp, 0);
2676 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2679 val = tr32(GRC_VCPU_EXT_CTRL);
2680 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2681 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2685 for (i = 0; i < 200; i++) {
2686 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2687 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2692 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2693 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2694 WOL_DRV_STATE_SHUTDOWN |
2698 if (device_should_wake) {
2701 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2703 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2707 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2708 mac_mode = MAC_MODE_PORT_MODE_GMII;
2710 mac_mode = MAC_MODE_PORT_MODE_MII;
2712 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2713 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2715 u32 speed = (tp->tg3_flags &
2716 TG3_FLAG_WOL_SPEED_100MB) ?
2717 SPEED_100 : SPEED_10;
2718 if (tg3_5700_link_polarity(tp, speed))
2719 mac_mode |= MAC_MODE_LINK_POLARITY;
2721 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2724 mac_mode = MAC_MODE_PORT_MODE_TBI;
2727 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2728 tw32(MAC_LED_CTRL, tp->led_ctrl);
2730 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2731 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2732 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2733 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2734 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2735 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2737 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
2738 mac_mode |= MAC_MODE_APE_TX_EN |
2739 MAC_MODE_APE_RX_EN |
2740 MAC_MODE_TDE_ENABLE;
2742 tw32_f(MAC_MODE, mac_mode);
2745 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2749 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2750 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2751 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2754 base_val = tp->pci_clock_ctrl;
2755 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2756 CLOCK_CTRL_TXCLK_DISABLE);
2758 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2759 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2760 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2761 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2762 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2764 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2765 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2766 u32 newbits1, newbits2;
2768 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2769 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2770 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2771 CLOCK_CTRL_TXCLK_DISABLE |
2773 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2774 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2775 newbits1 = CLOCK_CTRL_625_CORE;
2776 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2778 newbits1 = CLOCK_CTRL_ALTCLK;
2779 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2782 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2785 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2788 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2791 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2793 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2794 CLOCK_CTRL_TXCLK_DISABLE |
2795 CLOCK_CTRL_44MHZ_CORE);
2797 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2800 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2801 tp->pci_clock_ctrl | newbits3, 40);
2805 if (!(device_should_wake) &&
2806 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2807 tg3_power_down_phy(tp, do_low_power);
2809 tg3_frob_aux_power(tp);
2811 /* Workaround for unstable PLL clock */
2812 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2813 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2814 u32 val = tr32(0x7d00);
2816 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2818 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2821 err = tg3_nvram_lock(tp);
2822 tg3_halt_cpu(tp, RX_CPU_BASE);
2824 tg3_nvram_unlock(tp);
2828 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2833 static void tg3_power_down(struct tg3 *tp)
2835 tg3_power_down_prepare(tp);
2837 pci_wake_from_d3(tp->pdev, tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2838 pci_set_power_state(tp->pdev, PCI_D3hot);
2841 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2843 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2844 case MII_TG3_AUX_STAT_10HALF:
2846 *duplex = DUPLEX_HALF;
2849 case MII_TG3_AUX_STAT_10FULL:
2851 *duplex = DUPLEX_FULL;
2854 case MII_TG3_AUX_STAT_100HALF:
2856 *duplex = DUPLEX_HALF;
2859 case MII_TG3_AUX_STAT_100FULL:
2861 *duplex = DUPLEX_FULL;
2864 case MII_TG3_AUX_STAT_1000HALF:
2865 *speed = SPEED_1000;
2866 *duplex = DUPLEX_HALF;
2869 case MII_TG3_AUX_STAT_1000FULL:
2870 *speed = SPEED_1000;
2871 *duplex = DUPLEX_FULL;
2875 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2876 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2878 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2882 *speed = SPEED_INVALID;
2883 *duplex = DUPLEX_INVALID;
2888 static void tg3_phy_copper_begin(struct tg3 *tp)
2893 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2894 /* Entering low power mode. Disable gigabit and
2895 * 100baseT advertisements.
2897 tg3_writephy(tp, MII_TG3_CTRL, 0);
2899 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2900 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2901 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2902 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2904 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2905 } else if (tp->link_config.speed == SPEED_INVALID) {
2906 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2907 tp->link_config.advertising &=
2908 ~(ADVERTISED_1000baseT_Half |
2909 ADVERTISED_1000baseT_Full);
2911 new_adv = ADVERTISE_CSMA;
2912 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2913 new_adv |= ADVERTISE_10HALF;
2914 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2915 new_adv |= ADVERTISE_10FULL;
2916 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2917 new_adv |= ADVERTISE_100HALF;
2918 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2919 new_adv |= ADVERTISE_100FULL;
2921 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2923 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2925 if (tp->link_config.advertising &
2926 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2928 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2929 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2930 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2931 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2932 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2933 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2934 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2935 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2936 MII_TG3_CTRL_ENABLE_AS_MASTER);
2937 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2939 tg3_writephy(tp, MII_TG3_CTRL, 0);
2942 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2943 new_adv |= ADVERTISE_CSMA;
2945 /* Asking for a specific link mode. */
2946 if (tp->link_config.speed == SPEED_1000) {
2947 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2949 if (tp->link_config.duplex == DUPLEX_FULL)
2950 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2952 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2953 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2954 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2955 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2956 MII_TG3_CTRL_ENABLE_AS_MASTER);
2958 if (tp->link_config.speed == SPEED_100) {
2959 if (tp->link_config.duplex == DUPLEX_FULL)
2960 new_adv |= ADVERTISE_100FULL;
2962 new_adv |= ADVERTISE_100HALF;
2964 if (tp->link_config.duplex == DUPLEX_FULL)
2965 new_adv |= ADVERTISE_10FULL;
2967 new_adv |= ADVERTISE_10HALF;
2969 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2974 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2977 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
2980 tw32(TG3_CPMU_EEE_MODE,
2981 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2983 /* Enable SM_DSP clock and tx 6dB coding. */
2984 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
2985 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
2986 MII_TG3_AUXCTL_ACTL_TX_6DB;
2987 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2989 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2991 case ASIC_REV_57765:
2992 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2993 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2994 MII_TG3_DSP_CH34TP2_HIBW01);
2997 val = MII_TG3_DSP_TAP26_ALNOKO |
2998 MII_TG3_DSP_TAP26_RMRXSTO |
2999 MII_TG3_DSP_TAP26_OPCSINPT;
3000 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3004 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3005 /* Advertise 100-BaseTX EEE ability */
3006 if (tp->link_config.advertising &
3007 ADVERTISED_100baseT_Full)
3008 val |= MDIO_AN_EEE_ADV_100TX;
3009 /* Advertise 1000-BaseT EEE ability */
3010 if (tp->link_config.advertising &
3011 ADVERTISED_1000baseT_Full)
3012 val |= MDIO_AN_EEE_ADV_1000T;
3014 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3016 /* Turn off SM_DSP clock. */
3017 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
3018 MII_TG3_AUXCTL_ACTL_TX_6DB;
3019 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3022 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3023 tp->link_config.speed != SPEED_INVALID) {
3024 u32 bmcr, orig_bmcr;
3026 tp->link_config.active_speed = tp->link_config.speed;
3027 tp->link_config.active_duplex = tp->link_config.duplex;
3030 switch (tp->link_config.speed) {
3036 bmcr |= BMCR_SPEED100;
3040 bmcr |= TG3_BMCR_SPEED1000;
3044 if (tp->link_config.duplex == DUPLEX_FULL)
3045 bmcr |= BMCR_FULLDPLX;
3047 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3048 (bmcr != orig_bmcr)) {
3049 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3050 for (i = 0; i < 1500; i++) {
3054 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3055 tg3_readphy(tp, MII_BMSR, &tmp))
3057 if (!(tmp & BMSR_LSTATUS)) {
3062 tg3_writephy(tp, MII_BMCR, bmcr);
3066 tg3_writephy(tp, MII_BMCR,
3067 BMCR_ANENABLE | BMCR_ANRESTART);
3071 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3075 /* Turn off tap power management. */
3076 /* Set Extended packet length bit */
3077 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
3079 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3080 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3081 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3082 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3083 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3090 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3092 u32 adv_reg, all_mask = 0;
3094 if (mask & ADVERTISED_10baseT_Half)
3095 all_mask |= ADVERTISE_10HALF;
3096 if (mask & ADVERTISED_10baseT_Full)
3097 all_mask |= ADVERTISE_10FULL;
3098 if (mask & ADVERTISED_100baseT_Half)
3099 all_mask |= ADVERTISE_100HALF;
3100 if (mask & ADVERTISED_100baseT_Full)
3101 all_mask |= ADVERTISE_100FULL;
3103 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3106 if ((adv_reg & all_mask) != all_mask)
3108 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3112 if (mask & ADVERTISED_1000baseT_Half)
3113 all_mask |= ADVERTISE_1000HALF;
3114 if (mask & ADVERTISED_1000baseT_Full)
3115 all_mask |= ADVERTISE_1000FULL;
3117 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3120 if ((tg3_ctrl & all_mask) != all_mask)
3126 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3130 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3133 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3134 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3136 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3137 if (curadv != reqadv)
3140 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3141 tg3_readphy(tp, MII_LPA, rmtadv);
3143 /* Reprogram the advertisement register, even if it
3144 * does not affect the current link. If the link
3145 * gets renegotiated in the future, we can save an
3146 * additional renegotiation cycle by advertising
3147 * it correctly in the first place.
3149 if (curadv != reqadv) {
3150 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3151 ADVERTISE_PAUSE_ASYM);
3152 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3159 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3161 int current_link_up;
3163 u32 lcl_adv, rmt_adv;
3171 (MAC_STATUS_SYNC_CHANGED |
3172 MAC_STATUS_CFG_CHANGED |
3173 MAC_STATUS_MI_COMPLETION |
3174 MAC_STATUS_LNKSTATE_CHANGED));
3177 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3179 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3183 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3185 /* Some third-party PHYs need to be reset on link going
3188 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3189 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3190 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3191 netif_carrier_ok(tp->dev)) {
3192 tg3_readphy(tp, MII_BMSR, &bmsr);
3193 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3194 !(bmsr & BMSR_LSTATUS))
3200 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3201 tg3_readphy(tp, MII_BMSR, &bmsr);
3202 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3203 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3206 if (!(bmsr & BMSR_LSTATUS)) {
3207 err = tg3_init_5401phy_dsp(tp);
3211 tg3_readphy(tp, MII_BMSR, &bmsr);
3212 for (i = 0; i < 1000; i++) {
3214 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3215 (bmsr & BMSR_LSTATUS)) {
3221 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3222 TG3_PHY_REV_BCM5401_B0 &&
3223 !(bmsr & BMSR_LSTATUS) &&
3224 tp->link_config.active_speed == SPEED_1000) {
3225 err = tg3_phy_reset(tp);
3227 err = tg3_init_5401phy_dsp(tp);
3232 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3233 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3234 /* 5701 {A0,B0} CRC bug workaround */
3235 tg3_writephy(tp, 0x15, 0x0a75);
3236 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3237 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3238 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3241 /* Clear pending interrupts... */
3242 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3243 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3245 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3246 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3247 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3248 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3250 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3251 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3252 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3253 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3254 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3256 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3259 current_link_up = 0;
3260 current_speed = SPEED_INVALID;
3261 current_duplex = DUPLEX_INVALID;
3263 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3264 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3265 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3266 if (!(val & (1 << 10))) {
3268 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3274 for (i = 0; i < 100; i++) {
3275 tg3_readphy(tp, MII_BMSR, &bmsr);
3276 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3277 (bmsr & BMSR_LSTATUS))
3282 if (bmsr & BMSR_LSTATUS) {
3285 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3286 for (i = 0; i < 2000; i++) {
3288 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3293 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3298 for (i = 0; i < 200; i++) {
3299 tg3_readphy(tp, MII_BMCR, &bmcr);
3300 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3302 if (bmcr && bmcr != 0x7fff)
3310 tp->link_config.active_speed = current_speed;
3311 tp->link_config.active_duplex = current_duplex;
3313 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3314 if ((bmcr & BMCR_ANENABLE) &&
3315 tg3_copper_is_advertising_all(tp,
3316 tp->link_config.advertising)) {
3317 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3319 current_link_up = 1;
3322 if (!(bmcr & BMCR_ANENABLE) &&
3323 tp->link_config.speed == current_speed &&
3324 tp->link_config.duplex == current_duplex &&
3325 tp->link_config.flowctrl ==
3326 tp->link_config.active_flowctrl) {
3327 current_link_up = 1;
3331 if (current_link_up == 1 &&
3332 tp->link_config.active_duplex == DUPLEX_FULL)
3333 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3337 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3338 tg3_phy_copper_begin(tp);
3340 tg3_readphy(tp, MII_BMSR, &bmsr);
3341 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3342 (bmsr & BMSR_LSTATUS))
3343 current_link_up = 1;
3346 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3347 if (current_link_up == 1) {
3348 if (tp->link_config.active_speed == SPEED_100 ||
3349 tp->link_config.active_speed == SPEED_10)
3350 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3352 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3353 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3354 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3356 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3358 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3359 if (tp->link_config.active_duplex == DUPLEX_HALF)
3360 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3362 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3363 if (current_link_up == 1 &&
3364 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3365 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3367 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3370 /* ??? Without this setting Netgear GA302T PHY does not
3371 * ??? send/receive packets...
3373 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3374 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3375 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3376 tw32_f(MAC_MI_MODE, tp->mi_mode);
3380 tw32_f(MAC_MODE, tp->mac_mode);
3383 tg3_phy_eee_adjust(tp, current_link_up);
3385 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3386 /* Polled via timer. */
3387 tw32_f(MAC_EVENT, 0);
3389 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3394 current_link_up == 1 &&
3395 tp->link_config.active_speed == SPEED_1000 &&
3396 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3397 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3400 (MAC_STATUS_SYNC_CHANGED |
3401 MAC_STATUS_CFG_CHANGED));
3404 NIC_SRAM_FIRMWARE_MBOX,
3405 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3408 /* Prevent send BD corruption. */
3409 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3410 u16 oldlnkctl, newlnkctl;
3412 pci_read_config_word(tp->pdev,
3413 tp->pcie_cap + PCI_EXP_LNKCTL,
3415 if (tp->link_config.active_speed == SPEED_100 ||
3416 tp->link_config.active_speed == SPEED_10)
3417 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3419 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3420 if (newlnkctl != oldlnkctl)
3421 pci_write_config_word(tp->pdev,
3422 tp->pcie_cap + PCI_EXP_LNKCTL,
3426 if (current_link_up != netif_carrier_ok(tp->dev)) {
3427 if (current_link_up)
3428 netif_carrier_on(tp->dev);
3430 netif_carrier_off(tp->dev);
3431 tg3_link_report(tp);
3437 struct tg3_fiber_aneginfo {
3439 #define ANEG_STATE_UNKNOWN 0
3440 #define ANEG_STATE_AN_ENABLE 1
3441 #define ANEG_STATE_RESTART_INIT 2
3442 #define ANEG_STATE_RESTART 3
3443 #define ANEG_STATE_DISABLE_LINK_OK 4
3444 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3445 #define ANEG_STATE_ABILITY_DETECT 6
3446 #define ANEG_STATE_ACK_DETECT_INIT 7
3447 #define ANEG_STATE_ACK_DETECT 8
3448 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3449 #define ANEG_STATE_COMPLETE_ACK 10
3450 #define ANEG_STATE_IDLE_DETECT_INIT 11
3451 #define ANEG_STATE_IDLE_DETECT 12
3452 #define ANEG_STATE_LINK_OK 13
3453 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3454 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3457 #define MR_AN_ENABLE 0x00000001
3458 #define MR_RESTART_AN 0x00000002
3459 #define MR_AN_COMPLETE 0x00000004
3460 #define MR_PAGE_RX 0x00000008
3461 #define MR_NP_LOADED 0x00000010
3462 #define MR_TOGGLE_TX 0x00000020
3463 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3464 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3465 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3466 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3467 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3468 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3469 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3470 #define MR_TOGGLE_RX 0x00002000
3471 #define MR_NP_RX 0x00004000
3473 #define MR_LINK_OK 0x80000000
3475 unsigned long link_time, cur_time;
3477 u32 ability_match_cfg;
3478 int ability_match_count;
3480 char ability_match, idle_match, ack_match;
3482 u32 txconfig, rxconfig;
3483 #define ANEG_CFG_NP 0x00000080
3484 #define ANEG_CFG_ACK 0x00000040
3485 #define ANEG_CFG_RF2 0x00000020
3486 #define ANEG_CFG_RF1 0x00000010
3487 #define ANEG_CFG_PS2 0x00000001
3488 #define ANEG_CFG_PS1 0x00008000
3489 #define ANEG_CFG_HD 0x00004000
3490 #define ANEG_CFG_FD 0x00002000
3491 #define ANEG_CFG_INVAL 0x00001f06
3496 #define ANEG_TIMER_ENAB 2
3497 #define ANEG_FAILED -1
3499 #define ANEG_STATE_SETTLE_TIME 10000
3501 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3502 struct tg3_fiber_aneginfo *ap)
3505 unsigned long delta;
3509 if (ap->state == ANEG_STATE_UNKNOWN) {
3513 ap->ability_match_cfg = 0;
3514 ap->ability_match_count = 0;
3515 ap->ability_match = 0;
3521 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3522 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3524 if (rx_cfg_reg != ap->ability_match_cfg) {
3525 ap->ability_match_cfg = rx_cfg_reg;
3526 ap->ability_match = 0;
3527 ap->ability_match_count = 0;
3529 if (++ap->ability_match_count > 1) {
3530 ap->ability_match = 1;
3531 ap->ability_match_cfg = rx_cfg_reg;
3534 if (rx_cfg_reg & ANEG_CFG_ACK)
3542 ap->ability_match_cfg = 0;
3543 ap->ability_match_count = 0;
3544 ap->ability_match = 0;
3550 ap->rxconfig = rx_cfg_reg;
3553 switch (ap->state) {
3554 case ANEG_STATE_UNKNOWN:
3555 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3556 ap->state = ANEG_STATE_AN_ENABLE;
3559 case ANEG_STATE_AN_ENABLE:
3560 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3561 if (ap->flags & MR_AN_ENABLE) {
3564 ap->ability_match_cfg = 0;
3565 ap->ability_match_count = 0;
3566 ap->ability_match = 0;
3570 ap->state = ANEG_STATE_RESTART_INIT;
3572 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3576 case ANEG_STATE_RESTART_INIT:
3577 ap->link_time = ap->cur_time;
3578 ap->flags &= ~(MR_NP_LOADED);
3580 tw32(MAC_TX_AUTO_NEG, 0);
3581 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3582 tw32_f(MAC_MODE, tp->mac_mode);
3585 ret = ANEG_TIMER_ENAB;
3586 ap->state = ANEG_STATE_RESTART;
3589 case ANEG_STATE_RESTART:
3590 delta = ap->cur_time - ap->link_time;
3591 if (delta > ANEG_STATE_SETTLE_TIME)
3592 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3594 ret = ANEG_TIMER_ENAB;
3597 case ANEG_STATE_DISABLE_LINK_OK:
3601 case ANEG_STATE_ABILITY_DETECT_INIT:
3602 ap->flags &= ~(MR_TOGGLE_TX);
3603 ap->txconfig = ANEG_CFG_FD;
3604 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3605 if (flowctrl & ADVERTISE_1000XPAUSE)
3606 ap->txconfig |= ANEG_CFG_PS1;
3607 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3608 ap->txconfig |= ANEG_CFG_PS2;
3609 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3610 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3611 tw32_f(MAC_MODE, tp->mac_mode);
3614 ap->state = ANEG_STATE_ABILITY_DETECT;
3617 case ANEG_STATE_ABILITY_DETECT:
3618 if (ap->ability_match != 0 && ap->rxconfig != 0)
3619 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3622 case ANEG_STATE_ACK_DETECT_INIT:
3623 ap->txconfig |= ANEG_CFG_ACK;
3624 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3625 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3626 tw32_f(MAC_MODE, tp->mac_mode);
3629 ap->state = ANEG_STATE_ACK_DETECT;
3632 case ANEG_STATE_ACK_DETECT:
3633 if (ap->ack_match != 0) {
3634 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3635 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3636 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3638 ap->state = ANEG_STATE_AN_ENABLE;
3640 } else if (ap->ability_match != 0 &&
3641 ap->rxconfig == 0) {
3642 ap->state = ANEG_STATE_AN_ENABLE;
3646 case ANEG_STATE_COMPLETE_ACK_INIT:
3647 if (ap->rxconfig & ANEG_CFG_INVAL) {
3651 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3652 MR_LP_ADV_HALF_DUPLEX |
3653 MR_LP_ADV_SYM_PAUSE |
3654 MR_LP_ADV_ASYM_PAUSE |
3655 MR_LP_ADV_REMOTE_FAULT1 |
3656 MR_LP_ADV_REMOTE_FAULT2 |
3657 MR_LP_ADV_NEXT_PAGE |
3660 if (ap->rxconfig & ANEG_CFG_FD)
3661 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3662 if (ap->rxconfig & ANEG_CFG_HD)
3663 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3664 if (ap->rxconfig & ANEG_CFG_PS1)
3665 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3666 if (ap->rxconfig & ANEG_CFG_PS2)
3667 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3668 if (ap->rxconfig & ANEG_CFG_RF1)
3669 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3670 if (ap->rxconfig & ANEG_CFG_RF2)
3671 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3672 if (ap->rxconfig & ANEG_CFG_NP)
3673 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3675 ap->link_time = ap->cur_time;
3677 ap->flags ^= (MR_TOGGLE_TX);
3678 if (ap->rxconfig & 0x0008)
3679 ap->flags |= MR_TOGGLE_RX;
3680 if (ap->rxconfig & ANEG_CFG_NP)
3681 ap->flags |= MR_NP_RX;
3682 ap->flags |= MR_PAGE_RX;
3684 ap->state = ANEG_STATE_COMPLETE_ACK;
3685 ret = ANEG_TIMER_ENAB;
3688 case ANEG_STATE_COMPLETE_ACK:
3689 if (ap->ability_match != 0 &&
3690 ap->rxconfig == 0) {
3691 ap->state = ANEG_STATE_AN_ENABLE;
3694 delta = ap->cur_time - ap->link_time;
3695 if (delta > ANEG_STATE_SETTLE_TIME) {
3696 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3697 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3699 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3700 !(ap->flags & MR_NP_RX)) {
3701 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3709 case ANEG_STATE_IDLE_DETECT_INIT:
3710 ap->link_time = ap->cur_time;
3711 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3712 tw32_f(MAC_MODE, tp->mac_mode);
3715 ap->state = ANEG_STATE_IDLE_DETECT;
3716 ret = ANEG_TIMER_ENAB;
3719 case ANEG_STATE_IDLE_DETECT:
3720 if (ap->ability_match != 0 &&
3721 ap->rxconfig == 0) {
3722 ap->state = ANEG_STATE_AN_ENABLE;
3725 delta = ap->cur_time - ap->link_time;
3726 if (delta > ANEG_STATE_SETTLE_TIME) {
3727 /* XXX another gem from the Broadcom driver :( */
3728 ap->state = ANEG_STATE_LINK_OK;
3732 case ANEG_STATE_LINK_OK:
3733 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3737 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3738 /* ??? unimplemented */
3741 case ANEG_STATE_NEXT_PAGE_WAIT:
3742 /* ??? unimplemented */
3753 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3756 struct tg3_fiber_aneginfo aninfo;
3757 int status = ANEG_FAILED;
3761 tw32_f(MAC_TX_AUTO_NEG, 0);
3763 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3764 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3767 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3770 memset(&aninfo, 0, sizeof(aninfo));
3771 aninfo.flags |= MR_AN_ENABLE;
3772 aninfo.state = ANEG_STATE_UNKNOWN;
3773 aninfo.cur_time = 0;
3775 while (++tick < 195000) {
3776 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3777 if (status == ANEG_DONE || status == ANEG_FAILED)
3783 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3784 tw32_f(MAC_MODE, tp->mac_mode);
3787 *txflags = aninfo.txconfig;
3788 *rxflags = aninfo.flags;
3790 if (status == ANEG_DONE &&
3791 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3792 MR_LP_ADV_FULL_DUPLEX)))
3798 static void tg3_init_bcm8002(struct tg3 *tp)
3800 u32 mac_status = tr32(MAC_STATUS);
3803 /* Reset when initting first time or we have a link. */
3804 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3805 !(mac_status & MAC_STATUS_PCS_SYNCED))
3808 /* Set PLL lock range. */
3809 tg3_writephy(tp, 0x16, 0x8007);
3812 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3814 /* Wait for reset to complete. */
3815 /* XXX schedule_timeout() ... */
3816 for (i = 0; i < 500; i++)
3819 /* Config mode; select PMA/Ch 1 regs. */
3820 tg3_writephy(tp, 0x10, 0x8411);
3822 /* Enable auto-lock and comdet, select txclk for tx. */
3823 tg3_writephy(tp, 0x11, 0x0a10);
3825 tg3_writephy(tp, 0x18, 0x00a0);
3826 tg3_writephy(tp, 0x16, 0x41ff);
3828 /* Assert and deassert POR. */
3829 tg3_writephy(tp, 0x13, 0x0400);
3831 tg3_writephy(tp, 0x13, 0x0000);
3833 tg3_writephy(tp, 0x11, 0x0a50);
3835 tg3_writephy(tp, 0x11, 0x0a10);
3837 /* Wait for signal to stabilize */
3838 /* XXX schedule_timeout() ... */
3839 for (i = 0; i < 15000; i++)
3842 /* Deselect the channel register so we can read the PHYID
3845 tg3_writephy(tp, 0x10, 0x8011);
3848 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3851 u32 sg_dig_ctrl, sg_dig_status;
3852 u32 serdes_cfg, expected_sg_dig_ctrl;
3853 int workaround, port_a;
3854 int current_link_up;
3857 expected_sg_dig_ctrl = 0;
3860 current_link_up = 0;
3862 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3863 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3865 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3868 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3869 /* preserve bits 20-23 for voltage regulator */
3870 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3873 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3875 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3876 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3878 u32 val = serdes_cfg;
3884 tw32_f(MAC_SERDES_CFG, val);
3887 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3889 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3890 tg3_setup_flow_control(tp, 0, 0);
3891 current_link_up = 1;
3896 /* Want auto-negotiation. */
3897 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3899 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3900 if (flowctrl & ADVERTISE_1000XPAUSE)
3901 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3902 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3903 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3905 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3906 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3907 tp->serdes_counter &&
3908 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3909 MAC_STATUS_RCVD_CFG)) ==
3910 MAC_STATUS_PCS_SYNCED)) {
3911 tp->serdes_counter--;
3912 current_link_up = 1;
3917 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3918 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3920 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3922 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3923 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3924 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3925 MAC_STATUS_SIGNAL_DET)) {
3926 sg_dig_status = tr32(SG_DIG_STATUS);
3927 mac_status = tr32(MAC_STATUS);
3929 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3930 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3931 u32 local_adv = 0, remote_adv = 0;
3933 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3934 local_adv |= ADVERTISE_1000XPAUSE;
3935 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3936 local_adv |= ADVERTISE_1000XPSE_ASYM;
3938 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3939 remote_adv |= LPA_1000XPAUSE;
3940 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3941 remote_adv |= LPA_1000XPAUSE_ASYM;
3943 tg3_setup_flow_control(tp, local_adv, remote_adv);
3944 current_link_up = 1;
3945 tp->serdes_counter = 0;
3946 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3947 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3948 if (tp->serdes_counter)
3949 tp->serdes_counter--;
3952 u32 val = serdes_cfg;
3959 tw32_f(MAC_SERDES_CFG, val);
3962 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3965 /* Link parallel detection - link is up */
3966 /* only if we have PCS_SYNC and not */
3967 /* receiving config code words */
3968 mac_status = tr32(MAC_STATUS);
3969 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3970 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3971 tg3_setup_flow_control(tp, 0, 0);
3972 current_link_up = 1;
3974 TG3_PHYFLG_PARALLEL_DETECT;
3975 tp->serdes_counter =
3976 SERDES_PARALLEL_DET_TIMEOUT;
3978 goto restart_autoneg;
3982 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3983 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3987 return current_link_up;
3990 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3992 int current_link_up = 0;
3994 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3997 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3998 u32 txflags, rxflags;
4001 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4002 u32 local_adv = 0, remote_adv = 0;
4004 if (txflags & ANEG_CFG_PS1)
4005 local_adv |= ADVERTISE_1000XPAUSE;
4006 if (txflags & ANEG_CFG_PS2)
4007 local_adv |= ADVERTISE_1000XPSE_ASYM;
4009 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4010 remote_adv |= LPA_1000XPAUSE;
4011 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4012 remote_adv |= LPA_1000XPAUSE_ASYM;
4014 tg3_setup_flow_control(tp, local_adv, remote_adv);
4016 current_link_up = 1;
4018 for (i = 0; i < 30; i++) {
4021 (MAC_STATUS_SYNC_CHANGED |
4022 MAC_STATUS_CFG_CHANGED));
4024 if ((tr32(MAC_STATUS) &
4025 (MAC_STATUS_SYNC_CHANGED |
4026 MAC_STATUS_CFG_CHANGED)) == 0)
4030 mac_status = tr32(MAC_STATUS);
4031 if (current_link_up == 0 &&
4032 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4033 !(mac_status & MAC_STATUS_RCVD_CFG))
4034 current_link_up = 1;
4036 tg3_setup_flow_control(tp, 0, 0);
4038 /* Forcing 1000FD link up. */
4039 current_link_up = 1;
4041 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4044 tw32_f(MAC_MODE, tp->mac_mode);
4049 return current_link_up;
4052 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4055 u16 orig_active_speed;
4056 u8 orig_active_duplex;
4058 int current_link_up;
4061 orig_pause_cfg = tp->link_config.active_flowctrl;
4062 orig_active_speed = tp->link_config.active_speed;
4063 orig_active_duplex = tp->link_config.active_duplex;
4065 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
4066 netif_carrier_ok(tp->dev) &&
4067 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
4068 mac_status = tr32(MAC_STATUS);
4069 mac_status &= (MAC_STATUS_PCS_SYNCED |
4070 MAC_STATUS_SIGNAL_DET |
4071 MAC_STATUS_CFG_CHANGED |
4072 MAC_STATUS_RCVD_CFG);
4073 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4074 MAC_STATUS_SIGNAL_DET)) {
4075 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4076 MAC_STATUS_CFG_CHANGED));
4081 tw32_f(MAC_TX_AUTO_NEG, 0);
4083 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4084 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4085 tw32_f(MAC_MODE, tp->mac_mode);
4088 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4089 tg3_init_bcm8002(tp);
4091 /* Enable link change event even when serdes polling. */
4092 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4095 current_link_up = 0;
4096 mac_status = tr32(MAC_STATUS);
4098 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
4099 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4101 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4103 tp->napi[0].hw_status->status =
4104 (SD_STATUS_UPDATED |
4105 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4107 for (i = 0; i < 100; i++) {
4108 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4109 MAC_STATUS_CFG_CHANGED));
4111 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4112 MAC_STATUS_CFG_CHANGED |
4113 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4117 mac_status = tr32(MAC_STATUS);
4118 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4119 current_link_up = 0;
4120 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4121 tp->serdes_counter == 0) {
4122 tw32_f(MAC_MODE, (tp->mac_mode |
4123 MAC_MODE_SEND_CONFIGS));
4125 tw32_f(MAC_MODE, tp->mac_mode);
4129 if (current_link_up == 1) {
4130 tp->link_config.active_speed = SPEED_1000;
4131 tp->link_config.active_duplex = DUPLEX_FULL;
4132 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4133 LED_CTRL_LNKLED_OVERRIDE |
4134 LED_CTRL_1000MBPS_ON));
4136 tp->link_config.active_speed = SPEED_INVALID;
4137 tp->link_config.active_duplex = DUPLEX_INVALID;
4138 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4139 LED_CTRL_LNKLED_OVERRIDE |
4140 LED_CTRL_TRAFFIC_OVERRIDE));
4143 if (current_link_up != netif_carrier_ok(tp->dev)) {
4144 if (current_link_up)
4145 netif_carrier_on(tp->dev);
4147 netif_carrier_off(tp->dev);
4148 tg3_link_report(tp);
4150 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4151 if (orig_pause_cfg != now_pause_cfg ||
4152 orig_active_speed != tp->link_config.active_speed ||
4153 orig_active_duplex != tp->link_config.active_duplex)
4154 tg3_link_report(tp);
4160 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4162 int current_link_up, err = 0;
4166 u32 local_adv, remote_adv;
4168 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4169 tw32_f(MAC_MODE, tp->mac_mode);
4175 (MAC_STATUS_SYNC_CHANGED |
4176 MAC_STATUS_CFG_CHANGED |
4177 MAC_STATUS_MI_COMPLETION |
4178 MAC_STATUS_LNKSTATE_CHANGED));
4184 current_link_up = 0;
4185 current_speed = SPEED_INVALID;
4186 current_duplex = DUPLEX_INVALID;
4188 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4189 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4190 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4191 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4192 bmsr |= BMSR_LSTATUS;
4194 bmsr &= ~BMSR_LSTATUS;
4197 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4199 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4200 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4201 /* do nothing, just check for link up at the end */
4202 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4205 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4206 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4207 ADVERTISE_1000XPAUSE |
4208 ADVERTISE_1000XPSE_ASYM |
4211 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4213 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4214 new_adv |= ADVERTISE_1000XHALF;
4215 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4216 new_adv |= ADVERTISE_1000XFULL;
4218 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4219 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4220 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4221 tg3_writephy(tp, MII_BMCR, bmcr);
4223 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4224 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4225 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4232 bmcr &= ~BMCR_SPEED1000;
4233 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4235 if (tp->link_config.duplex == DUPLEX_FULL)
4236 new_bmcr |= BMCR_FULLDPLX;
4238 if (new_bmcr != bmcr) {
4239 /* BMCR_SPEED1000 is a reserved bit that needs
4240 * to be set on write.
4242 new_bmcr |= BMCR_SPEED1000;
4244 /* Force a linkdown */
4245 if (netif_carrier_ok(tp->dev)) {
4248 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4249 adv &= ~(ADVERTISE_1000XFULL |
4250 ADVERTISE_1000XHALF |
4252 tg3_writephy(tp, MII_ADVERTISE, adv);
4253 tg3_writephy(tp, MII_BMCR, bmcr |
4257 netif_carrier_off(tp->dev);
4259 tg3_writephy(tp, MII_BMCR, new_bmcr);
4261 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4262 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4263 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4265 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4266 bmsr |= BMSR_LSTATUS;
4268 bmsr &= ~BMSR_LSTATUS;
4270 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4274 if (bmsr & BMSR_LSTATUS) {
4275 current_speed = SPEED_1000;
4276 current_link_up = 1;
4277 if (bmcr & BMCR_FULLDPLX)
4278 current_duplex = DUPLEX_FULL;
4280 current_duplex = DUPLEX_HALF;
4285 if (bmcr & BMCR_ANENABLE) {
4288 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4289 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4290 common = local_adv & remote_adv;
4291 if (common & (ADVERTISE_1000XHALF |
4292 ADVERTISE_1000XFULL)) {
4293 if (common & ADVERTISE_1000XFULL)
4294 current_duplex = DUPLEX_FULL;
4296 current_duplex = DUPLEX_HALF;
4297 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4298 /* Link is up via parallel detect */
4300 current_link_up = 0;
4305 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4306 tg3_setup_flow_control(tp, local_adv, remote_adv);
4308 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4309 if (tp->link_config.active_duplex == DUPLEX_HALF)
4310 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4312 tw32_f(MAC_MODE, tp->mac_mode);
4315 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4317 tp->link_config.active_speed = current_speed;
4318 tp->link_config.active_duplex = current_duplex;
4320 if (current_link_up != netif_carrier_ok(tp->dev)) {
4321 if (current_link_up)
4322 netif_carrier_on(tp->dev);
4324 netif_carrier_off(tp->dev);
4325 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4327 tg3_link_report(tp);
4332 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4334 if (tp->serdes_counter) {
4335 /* Give autoneg time to complete. */
4336 tp->serdes_counter--;
4340 if (!netif_carrier_ok(tp->dev) &&
4341 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4344 tg3_readphy(tp, MII_BMCR, &bmcr);
4345 if (bmcr & BMCR_ANENABLE) {
4348 /* Select shadow register 0x1f */
4349 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4350 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4352 /* Select expansion interrupt status register */
4353 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4354 MII_TG3_DSP_EXP1_INT_STAT);
4355 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4356 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4358 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4359 /* We have signal detect and not receiving
4360 * config code words, link is up by parallel
4364 bmcr &= ~BMCR_ANENABLE;
4365 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4366 tg3_writephy(tp, MII_BMCR, bmcr);
4367 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4370 } else if (netif_carrier_ok(tp->dev) &&
4371 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4372 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4375 /* Select expansion interrupt status register */
4376 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4377 MII_TG3_DSP_EXP1_INT_STAT);
4378 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4382 /* Config code words received, turn on autoneg. */
4383 tg3_readphy(tp, MII_BMCR, &bmcr);
4384 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4386 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4392 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4397 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4398 err = tg3_setup_fiber_phy(tp, force_reset);
4399 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4400 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4402 err = tg3_setup_copper_phy(tp, force_reset);
4404 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4407 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4408 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4410 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4415 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4416 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4417 tw32(GRC_MISC_CFG, val);
4420 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4421 (6 << TX_LENGTHS_IPG_SHIFT);
4422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4423 val |= tr32(MAC_TX_LENGTHS) &
4424 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4425 TX_LENGTHS_CNT_DWN_VAL_MSK);
4427 if (tp->link_config.active_speed == SPEED_1000 &&
4428 tp->link_config.active_duplex == DUPLEX_HALF)
4429 tw32(MAC_TX_LENGTHS, val |
4430 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4432 tw32(MAC_TX_LENGTHS, val |
4433 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4435 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4436 if (netif_carrier_ok(tp->dev)) {
4437 tw32(HOSTCC_STAT_COAL_TICKS,
4438 tp->coal.stats_block_coalesce_usecs);
4440 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4444 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4445 val = tr32(PCIE_PWR_MGMT_THRESH);
4446 if (!netif_carrier_ok(tp->dev))
4447 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4450 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4451 tw32(PCIE_PWR_MGMT_THRESH, val);
4457 static inline int tg3_irq_sync(struct tg3 *tp)
4459 return tp->irq_sync;
4462 /* This is called whenever we suspect that the system chipset is re-
4463 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4464 * is bogus tx completions. We try to recover by setting the
4465 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4468 static void tg3_tx_recover(struct tg3 *tp)
4470 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4471 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4473 netdev_warn(tp->dev,
4474 "The system may be re-ordering memory-mapped I/O "
4475 "cycles to the network device, attempting to recover. "
4476 "Please report the problem to the driver maintainer "
4477 "and include system chipset information.\n");
4479 spin_lock(&tp->lock);
4480 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4481 spin_unlock(&tp->lock);
4484 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4486 /* Tell compiler to fetch tx indices from memory. */
4488 return tnapi->tx_pending -
4489 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4492 /* Tigon3 never reports partial packet sends. So we do not
4493 * need special logic to handle SKBs that have not had all
4494 * of their frags sent yet, like SunGEM does.
4496 static void tg3_tx(struct tg3_napi *tnapi)
4498 struct tg3 *tp = tnapi->tp;
4499 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4500 u32 sw_idx = tnapi->tx_cons;
4501 struct netdev_queue *txq;
4502 int index = tnapi - tp->napi;
4504 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4507 txq = netdev_get_tx_queue(tp->dev, index);
4509 while (sw_idx != hw_idx) {
4510 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4511 struct sk_buff *skb = ri->skb;
4514 if (unlikely(skb == NULL)) {
4519 pci_unmap_single(tp->pdev,
4520 dma_unmap_addr(ri, mapping),
4526 sw_idx = NEXT_TX(sw_idx);
4528 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4529 ri = &tnapi->tx_buffers[sw_idx];
4530 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4533 pci_unmap_page(tp->pdev,
4534 dma_unmap_addr(ri, mapping),
4535 skb_shinfo(skb)->frags[i].size,
4537 sw_idx = NEXT_TX(sw_idx);
4542 if (unlikely(tx_bug)) {
4548 tnapi->tx_cons = sw_idx;
4550 /* Need to make the tx_cons update visible to tg3_start_xmit()
4551 * before checking for netif_queue_stopped(). Without the
4552 * memory barrier, there is a small possibility that tg3_start_xmit()
4553 * will miss it and cause the queue to be stopped forever.
4557 if (unlikely(netif_tx_queue_stopped(txq) &&
4558 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4559 __netif_tx_lock(txq, smp_processor_id());
4560 if (netif_tx_queue_stopped(txq) &&
4561 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4562 netif_tx_wake_queue(txq);
4563 __netif_tx_unlock(txq);
4567 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4572 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4573 map_sz, PCI_DMA_FROMDEVICE);
4574 dev_kfree_skb_any(ri->skb);
4578 /* Returns size of skb allocated or < 0 on error.
4580 * We only need to fill in the address because the other members
4581 * of the RX descriptor are invariant, see tg3_init_rings.
4583 * Note the purposeful assymetry of cpu vs. chip accesses. For
4584 * posting buffers we only dirty the first cache line of the RX
4585 * descriptor (containing the address). Whereas for the RX status
4586 * buffers the cpu only reads the last cacheline of the RX descriptor
4587 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4589 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4590 u32 opaque_key, u32 dest_idx_unmasked)
4592 struct tg3_rx_buffer_desc *desc;
4593 struct ring_info *map;
4594 struct sk_buff *skb;
4596 int skb_size, dest_idx;
4598 switch (opaque_key) {
4599 case RXD_OPAQUE_RING_STD:
4600 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4601 desc = &tpr->rx_std[dest_idx];
4602 map = &tpr->rx_std_buffers[dest_idx];
4603 skb_size = tp->rx_pkt_map_sz;
4606 case RXD_OPAQUE_RING_JUMBO:
4607 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4608 desc = &tpr->rx_jmb[dest_idx].std;
4609 map = &tpr->rx_jmb_buffers[dest_idx];
4610 skb_size = TG3_RX_JMB_MAP_SZ;
4617 /* Do not overwrite any of the map or rp information
4618 * until we are sure we can commit to a new buffer.
4620 * Callers depend upon this behavior and assume that
4621 * we leave everything unchanged if we fail.
4623 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4627 skb_reserve(skb, tp->rx_offset);
4629 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4630 PCI_DMA_FROMDEVICE);
4631 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4637 dma_unmap_addr_set(map, mapping, mapping);
4639 desc->addr_hi = ((u64)mapping >> 32);
4640 desc->addr_lo = ((u64)mapping & 0xffffffff);
4645 /* We only need to move over in the address because the other
4646 * members of the RX descriptor are invariant. See notes above
4647 * tg3_alloc_rx_skb for full details.
4649 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4650 struct tg3_rx_prodring_set *dpr,
4651 u32 opaque_key, int src_idx,
4652 u32 dest_idx_unmasked)
4654 struct tg3 *tp = tnapi->tp;
4655 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4656 struct ring_info *src_map, *dest_map;
4657 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4660 switch (opaque_key) {
4661 case RXD_OPAQUE_RING_STD:
4662 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4663 dest_desc = &dpr->rx_std[dest_idx];
4664 dest_map = &dpr->rx_std_buffers[dest_idx];
4665 src_desc = &spr->rx_std[src_idx];
4666 src_map = &spr->rx_std_buffers[src_idx];
4669 case RXD_OPAQUE_RING_JUMBO:
4670 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4671 dest_desc = &dpr->rx_jmb[dest_idx].std;
4672 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4673 src_desc = &spr->rx_jmb[src_idx].std;
4674 src_map = &spr->rx_jmb_buffers[src_idx];
4681 dest_map->skb = src_map->skb;
4682 dma_unmap_addr_set(dest_map, mapping,
4683 dma_unmap_addr(src_map, mapping));
4684 dest_desc->addr_hi = src_desc->addr_hi;
4685 dest_desc->addr_lo = src_desc->addr_lo;
4687 /* Ensure that the update to the skb happens after the physical
4688 * addresses have been transferred to the new BD location.
4692 src_map->skb = NULL;
4695 /* The RX ring scheme is composed of multiple rings which post fresh
4696 * buffers to the chip, and one special ring the chip uses to report
4697 * status back to the host.
4699 * The special ring reports the status of received packets to the
4700 * host. The chip does not write into the original descriptor the
4701 * RX buffer was obtained from. The chip simply takes the original
4702 * descriptor as provided by the host, updates the status and length
4703 * field, then writes this into the next status ring entry.
4705 * Each ring the host uses to post buffers to the chip is described
4706 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4707 * it is first placed into the on-chip ram. When the packet's length
4708 * is known, it walks down the TG3_BDINFO entries to select the ring.
4709 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4710 * which is within the range of the new packet's length is chosen.
4712 * The "separate ring for rx status" scheme may sound queer, but it makes
4713 * sense from a cache coherency perspective. If only the host writes
4714 * to the buffer post rings, and only the chip writes to the rx status
4715 * rings, then cache lines never move beyond shared-modified state.
4716 * If both the host and chip were to write into the same ring, cache line
4717 * eviction could occur since both entities want it in an exclusive state.
4719 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4721 struct tg3 *tp = tnapi->tp;
4722 u32 work_mask, rx_std_posted = 0;
4723 u32 std_prod_idx, jmb_prod_idx;
4724 u32 sw_idx = tnapi->rx_rcb_ptr;
4727 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4729 hw_idx = *(tnapi->rx_rcb_prod_idx);
4731 * We need to order the read of hw_idx and the read of
4732 * the opaque cookie.
4737 std_prod_idx = tpr->rx_std_prod_idx;
4738 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4739 while (sw_idx != hw_idx && budget > 0) {
4740 struct ring_info *ri;
4741 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4743 struct sk_buff *skb;
4744 dma_addr_t dma_addr;
4745 u32 opaque_key, desc_idx, *post_ptr;
4747 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4748 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4749 if (opaque_key == RXD_OPAQUE_RING_STD) {
4750 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4751 dma_addr = dma_unmap_addr(ri, mapping);
4753 post_ptr = &std_prod_idx;
4755 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4756 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4757 dma_addr = dma_unmap_addr(ri, mapping);
4759 post_ptr = &jmb_prod_idx;
4761 goto next_pkt_nopost;
4763 work_mask |= opaque_key;
4765 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4766 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4768 tg3_recycle_rx(tnapi, tpr, opaque_key,
4769 desc_idx, *post_ptr);
4771 /* Other statistics kept track of by card. */
4776 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4779 if (len > TG3_RX_COPY_THRESH(tp)) {
4782 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4787 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4788 PCI_DMA_FROMDEVICE);
4790 /* Ensure that the update to the skb happens
4791 * after the usage of the old DMA mapping.
4799 struct sk_buff *copy_skb;
4801 tg3_recycle_rx(tnapi, tpr, opaque_key,
4802 desc_idx, *post_ptr);
4804 copy_skb = netdev_alloc_skb(tp->dev, len +
4806 if (copy_skb == NULL)
4807 goto drop_it_no_recycle;
4809 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4810 skb_put(copy_skb, len);
4811 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4812 skb_copy_from_linear_data(skb, copy_skb->data, len);
4813 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4815 /* We'll reuse the original ring buffer. */
4819 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4820 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4821 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4822 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4823 skb->ip_summed = CHECKSUM_UNNECESSARY;
4825 skb_checksum_none_assert(skb);
4827 skb->protocol = eth_type_trans(skb, tp->dev);
4829 if (len > (tp->dev->mtu + ETH_HLEN) &&
4830 skb->protocol != htons(ETH_P_8021Q)) {
4832 goto drop_it_no_recycle;
4835 if (desc->type_flags & RXD_FLAG_VLAN &&
4836 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4837 __vlan_hwaccel_put_tag(skb,
4838 desc->err_vlan & RXD_VLAN_MASK);
4840 napi_gro_receive(&tnapi->napi, skb);
4848 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4849 tpr->rx_std_prod_idx = std_prod_idx &
4850 tp->rx_std_ring_mask;
4851 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4852 tpr->rx_std_prod_idx);
4853 work_mask &= ~RXD_OPAQUE_RING_STD;
4858 sw_idx &= tp->rx_ret_ring_mask;
4860 /* Refresh hw_idx to see if there is new work */
4861 if (sw_idx == hw_idx) {
4862 hw_idx = *(tnapi->rx_rcb_prod_idx);
4867 /* ACK the status ring. */
4868 tnapi->rx_rcb_ptr = sw_idx;
4869 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4871 /* Refill RX ring(s). */
4872 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4873 if (work_mask & RXD_OPAQUE_RING_STD) {
4874 tpr->rx_std_prod_idx = std_prod_idx &
4875 tp->rx_std_ring_mask;
4876 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4877 tpr->rx_std_prod_idx);
4879 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4880 tpr->rx_jmb_prod_idx = jmb_prod_idx &
4881 tp->rx_jmb_ring_mask;
4882 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4883 tpr->rx_jmb_prod_idx);
4886 } else if (work_mask) {
4887 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4888 * updated before the producer indices can be updated.
4892 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
4893 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
4895 if (tnapi != &tp->napi[1])
4896 napi_schedule(&tp->napi[1].napi);
4902 static void tg3_poll_link(struct tg3 *tp)
4904 /* handle link change and other phy events */
4905 if (!(tp->tg3_flags &
4906 (TG3_FLAG_USE_LINKCHG_REG |
4907 TG3_FLAG_POLL_SERDES))) {
4908 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4910 if (sblk->status & SD_STATUS_LINK_CHG) {
4911 sblk->status = SD_STATUS_UPDATED |
4912 (sblk->status & ~SD_STATUS_LINK_CHG);
4913 spin_lock(&tp->lock);
4914 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4916 (MAC_STATUS_SYNC_CHANGED |
4917 MAC_STATUS_CFG_CHANGED |
4918 MAC_STATUS_MI_COMPLETION |
4919 MAC_STATUS_LNKSTATE_CHANGED));
4922 tg3_setup_phy(tp, 0);
4923 spin_unlock(&tp->lock);
4928 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4929 struct tg3_rx_prodring_set *dpr,
4930 struct tg3_rx_prodring_set *spr)
4932 u32 si, di, cpycnt, src_prod_idx;
4936 src_prod_idx = spr->rx_std_prod_idx;
4938 /* Make sure updates to the rx_std_buffers[] entries and the
4939 * standard producer index are seen in the correct order.
4943 if (spr->rx_std_cons_idx == src_prod_idx)
4946 if (spr->rx_std_cons_idx < src_prod_idx)
4947 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4949 cpycnt = tp->rx_std_ring_mask + 1 -
4950 spr->rx_std_cons_idx;
4952 cpycnt = min(cpycnt,
4953 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
4955 si = spr->rx_std_cons_idx;
4956 di = dpr->rx_std_prod_idx;
4958 for (i = di; i < di + cpycnt; i++) {
4959 if (dpr->rx_std_buffers[i].skb) {
4969 /* Ensure that updates to the rx_std_buffers ring and the
4970 * shadowed hardware producer ring from tg3_recycle_skb() are
4971 * ordered correctly WRT the skb check above.
4975 memcpy(&dpr->rx_std_buffers[di],
4976 &spr->rx_std_buffers[si],
4977 cpycnt * sizeof(struct ring_info));
4979 for (i = 0; i < cpycnt; i++, di++, si++) {
4980 struct tg3_rx_buffer_desc *sbd, *dbd;
4981 sbd = &spr->rx_std[si];
4982 dbd = &dpr->rx_std[di];
4983 dbd->addr_hi = sbd->addr_hi;
4984 dbd->addr_lo = sbd->addr_lo;
4987 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
4988 tp->rx_std_ring_mask;
4989 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
4990 tp->rx_std_ring_mask;
4994 src_prod_idx = spr->rx_jmb_prod_idx;
4996 /* Make sure updates to the rx_jmb_buffers[] entries and
4997 * the jumbo producer index are seen in the correct order.
5001 if (spr->rx_jmb_cons_idx == src_prod_idx)
5004 if (spr->rx_jmb_cons_idx < src_prod_idx)
5005 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5007 cpycnt = tp->rx_jmb_ring_mask + 1 -
5008 spr->rx_jmb_cons_idx;
5010 cpycnt = min(cpycnt,
5011 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5013 si = spr->rx_jmb_cons_idx;
5014 di = dpr->rx_jmb_prod_idx;
5016 for (i = di; i < di + cpycnt; i++) {
5017 if (dpr->rx_jmb_buffers[i].skb) {
5027 /* Ensure that updates to the rx_jmb_buffers ring and the
5028 * shadowed hardware producer ring from tg3_recycle_skb() are
5029 * ordered correctly WRT the skb check above.
5033 memcpy(&dpr->rx_jmb_buffers[di],
5034 &spr->rx_jmb_buffers[si],
5035 cpycnt * sizeof(struct ring_info));
5037 for (i = 0; i < cpycnt; i++, di++, si++) {
5038 struct tg3_rx_buffer_desc *sbd, *dbd;
5039 sbd = &spr->rx_jmb[si].std;
5040 dbd = &dpr->rx_jmb[di].std;
5041 dbd->addr_hi = sbd->addr_hi;
5042 dbd->addr_lo = sbd->addr_lo;
5045 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5046 tp->rx_jmb_ring_mask;
5047 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5048 tp->rx_jmb_ring_mask;
5054 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5056 struct tg3 *tp = tnapi->tp;
5058 /* run TX completion thread */
5059 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5061 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5065 /* run RX thread, within the bounds set by NAPI.
5066 * All RX "locking" is done by ensuring outside
5067 * code synchronizes with tg3->napi.poll()
5069 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5070 work_done += tg3_rx(tnapi, budget - work_done);
5072 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
5073 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5075 u32 std_prod_idx = dpr->rx_std_prod_idx;
5076 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5078 for (i = 1; i < tp->irq_cnt; i++)
5079 err |= tg3_rx_prodring_xfer(tp, dpr,
5080 &tp->napi[i].prodring);
5084 if (std_prod_idx != dpr->rx_std_prod_idx)
5085 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5086 dpr->rx_std_prod_idx);
5088 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5089 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5090 dpr->rx_jmb_prod_idx);
5095 tw32_f(HOSTCC_MODE, tp->coal_now);
5101 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5103 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5104 struct tg3 *tp = tnapi->tp;
5106 struct tg3_hw_status *sblk = tnapi->hw_status;
5109 work_done = tg3_poll_work(tnapi, work_done, budget);
5111 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5114 if (unlikely(work_done >= budget))
5117 /* tp->last_tag is used in tg3_int_reenable() below
5118 * to tell the hw how much work has been processed,
5119 * so we must read it before checking for more work.
5121 tnapi->last_tag = sblk->status_tag;
5122 tnapi->last_irq_tag = tnapi->last_tag;
5125 /* check for RX/TX work to do */
5126 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5127 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5128 napi_complete(napi);
5129 /* Reenable interrupts. */
5130 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5139 /* work_done is guaranteed to be less than budget. */
5140 napi_complete(napi);
5141 schedule_work(&tp->reset_task);
5145 static int tg3_poll(struct napi_struct *napi, int budget)
5147 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5148 struct tg3 *tp = tnapi->tp;
5150 struct tg3_hw_status *sblk = tnapi->hw_status;
5155 work_done = tg3_poll_work(tnapi, work_done, budget);
5157 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5160 if (unlikely(work_done >= budget))
5163 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5164 /* tp->last_tag is used in tg3_int_reenable() below
5165 * to tell the hw how much work has been processed,
5166 * so we must read it before checking for more work.
5168 tnapi->last_tag = sblk->status_tag;
5169 tnapi->last_irq_tag = tnapi->last_tag;
5172 sblk->status &= ~SD_STATUS_UPDATED;
5174 if (likely(!tg3_has_work(tnapi))) {
5175 napi_complete(napi);
5176 tg3_int_reenable(tnapi);
5184 /* work_done is guaranteed to be less than budget. */
5185 napi_complete(napi);
5186 schedule_work(&tp->reset_task);
5190 static void tg3_napi_disable(struct tg3 *tp)
5194 for (i = tp->irq_cnt - 1; i >= 0; i--)
5195 napi_disable(&tp->napi[i].napi);
5198 static void tg3_napi_enable(struct tg3 *tp)
5202 for (i = 0; i < tp->irq_cnt; i++)
5203 napi_enable(&tp->napi[i].napi);
5206 static void tg3_napi_init(struct tg3 *tp)
5210 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5211 for (i = 1; i < tp->irq_cnt; i++)
5212 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5215 static void tg3_napi_fini(struct tg3 *tp)
5219 for (i = 0; i < tp->irq_cnt; i++)
5220 netif_napi_del(&tp->napi[i].napi);
5223 static inline void tg3_netif_stop(struct tg3 *tp)
5225 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5226 tg3_napi_disable(tp);
5227 netif_tx_disable(tp->dev);
5230 static inline void tg3_netif_start(struct tg3 *tp)
5232 /* NOTE: unconditional netif_tx_wake_all_queues is only
5233 * appropriate so long as all callers are assured to
5234 * have free tx slots (such as after tg3_init_hw)
5236 netif_tx_wake_all_queues(tp->dev);
5238 tg3_napi_enable(tp);
5239 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5240 tg3_enable_ints(tp);
5243 static void tg3_irq_quiesce(struct tg3 *tp)
5247 BUG_ON(tp->irq_sync);
5252 for (i = 0; i < tp->irq_cnt; i++)
5253 synchronize_irq(tp->napi[i].irq_vec);
5256 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5257 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5258 * with as well. Most of the time, this is not necessary except when
5259 * shutting down the device.
5261 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5263 spin_lock_bh(&tp->lock);
5265 tg3_irq_quiesce(tp);
5268 static inline void tg3_full_unlock(struct tg3 *tp)
5270 spin_unlock_bh(&tp->lock);
5273 /* One-shot MSI handler - Chip automatically disables interrupt
5274 * after sending MSI so driver doesn't have to do it.
5276 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5278 struct tg3_napi *tnapi = dev_id;
5279 struct tg3 *tp = tnapi->tp;
5281 prefetch(tnapi->hw_status);
5283 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5285 if (likely(!tg3_irq_sync(tp)))
5286 napi_schedule(&tnapi->napi);
5291 /* MSI ISR - No need to check for interrupt sharing and no need to
5292 * flush status block and interrupt mailbox. PCI ordering rules
5293 * guarantee that MSI will arrive after the status block.
5295 static irqreturn_t tg3_msi(int irq, void *dev_id)
5297 struct tg3_napi *tnapi = dev_id;
5298 struct tg3 *tp = tnapi->tp;
5300 prefetch(tnapi->hw_status);
5302 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5304 * Writing any value to intr-mbox-0 clears PCI INTA# and
5305 * chip-internal interrupt pending events.
5306 * Writing non-zero to intr-mbox-0 additional tells the
5307 * NIC to stop sending us irqs, engaging "in-intr-handler"
5310 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5311 if (likely(!tg3_irq_sync(tp)))
5312 napi_schedule(&tnapi->napi);
5314 return IRQ_RETVAL(1);
5317 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5319 struct tg3_napi *tnapi = dev_id;
5320 struct tg3 *tp = tnapi->tp;
5321 struct tg3_hw_status *sblk = tnapi->hw_status;
5322 unsigned int handled = 1;
5324 /* In INTx mode, it is possible for the interrupt to arrive at
5325 * the CPU before the status block posted prior to the interrupt.
5326 * Reading the PCI State register will confirm whether the
5327 * interrupt is ours and will flush the status block.
5329 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5330 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5331 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5338 * Writing any value to intr-mbox-0 clears PCI INTA# and
5339 * chip-internal interrupt pending events.
5340 * Writing non-zero to intr-mbox-0 additional tells the
5341 * NIC to stop sending us irqs, engaging "in-intr-handler"
5344 * Flush the mailbox to de-assert the IRQ immediately to prevent
5345 * spurious interrupts. The flush impacts performance but
5346 * excessive spurious interrupts can be worse in some cases.
5348 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5349 if (tg3_irq_sync(tp))
5351 sblk->status &= ~SD_STATUS_UPDATED;
5352 if (likely(tg3_has_work(tnapi))) {
5353 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5354 napi_schedule(&tnapi->napi);
5356 /* No work, shared interrupt perhaps? re-enable
5357 * interrupts, and flush that PCI write
5359 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5363 return IRQ_RETVAL(handled);
5366 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5368 struct tg3_napi *tnapi = dev_id;
5369 struct tg3 *tp = tnapi->tp;
5370 struct tg3_hw_status *sblk = tnapi->hw_status;
5371 unsigned int handled = 1;
5373 /* In INTx mode, it is possible for the interrupt to arrive at
5374 * the CPU before the status block posted prior to the interrupt.
5375 * Reading the PCI State register will confirm whether the
5376 * interrupt is ours and will flush the status block.
5378 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5379 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5380 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5387 * writing any value to intr-mbox-0 clears PCI INTA# and
5388 * chip-internal interrupt pending events.
5389 * writing non-zero to intr-mbox-0 additional tells the
5390 * NIC to stop sending us irqs, engaging "in-intr-handler"
5393 * Flush the mailbox to de-assert the IRQ immediately to prevent
5394 * spurious interrupts. The flush impacts performance but
5395 * excessive spurious interrupts can be worse in some cases.
5397 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5400 * In a shared interrupt configuration, sometimes other devices'
5401 * interrupts will scream. We record the current status tag here
5402 * so that the above check can report that the screaming interrupts
5403 * are unhandled. Eventually they will be silenced.
5405 tnapi->last_irq_tag = sblk->status_tag;
5407 if (tg3_irq_sync(tp))
5410 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5412 napi_schedule(&tnapi->napi);
5415 return IRQ_RETVAL(handled);
5418 /* ISR for interrupt test */
5419 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5421 struct tg3_napi *tnapi = dev_id;
5422 struct tg3 *tp = tnapi->tp;
5423 struct tg3_hw_status *sblk = tnapi->hw_status;
5425 if ((sblk->status & SD_STATUS_UPDATED) ||
5426 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5427 tg3_disable_ints(tp);
5428 return IRQ_RETVAL(1);
5430 return IRQ_RETVAL(0);
5433 static int tg3_init_hw(struct tg3 *, int);
5434 static int tg3_halt(struct tg3 *, int, int);
5436 /* Restart hardware after configuration changes, self-test, etc.
5437 * Invoked with tp->lock held.
5439 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5440 __releases(tp->lock)
5441 __acquires(tp->lock)
5445 err = tg3_init_hw(tp, reset_phy);
5448 "Failed to re-initialize device, aborting\n");
5449 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5450 tg3_full_unlock(tp);
5451 del_timer_sync(&tp->timer);
5453 tg3_napi_enable(tp);
5455 tg3_full_lock(tp, 0);
5460 #ifdef CONFIG_NET_POLL_CONTROLLER
5461 static void tg3_poll_controller(struct net_device *dev)
5464 struct tg3 *tp = netdev_priv(dev);
5466 for (i = 0; i < tp->irq_cnt; i++)
5467 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5471 static void tg3_reset_task(struct work_struct *work)
5473 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5475 unsigned int restart_timer;
5477 tg3_full_lock(tp, 0);
5479 if (!netif_running(tp->dev)) {
5480 tg3_full_unlock(tp);
5484 tg3_full_unlock(tp);
5490 tg3_full_lock(tp, 1);
5492 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5493 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5495 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5496 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5497 tp->write32_rx_mbox = tg3_write_flush_reg32;
5498 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5499 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5502 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5503 err = tg3_init_hw(tp, 1);
5507 tg3_netif_start(tp);
5510 mod_timer(&tp->timer, jiffies + 1);
5513 tg3_full_unlock(tp);
5519 static void tg3_dump_short_state(struct tg3 *tp)
5521 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5522 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5523 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5524 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5527 static void tg3_tx_timeout(struct net_device *dev)
5529 struct tg3 *tp = netdev_priv(dev);
5531 if (netif_msg_tx_err(tp)) {
5532 netdev_err(dev, "transmit timed out, resetting\n");
5533 tg3_dump_short_state(tp);
5536 schedule_work(&tp->reset_task);
5539 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5540 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5542 u32 base = (u32) mapping & 0xffffffff;
5544 return (base > 0xffffdcc0) && (base + len + 8 < base);
5547 /* Test for DMA addresses > 40-bit */
5548 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5551 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5552 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5553 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5560 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5562 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5563 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5564 struct sk_buff *skb, u32 last_plus_one,
5565 u32 *start, u32 base_flags, u32 mss)
5567 struct tg3 *tp = tnapi->tp;
5568 struct sk_buff *new_skb;
5569 dma_addr_t new_addr = 0;
5573 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5574 new_skb = skb_copy(skb, GFP_ATOMIC);
5576 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5578 new_skb = skb_copy_expand(skb,
5579 skb_headroom(skb) + more_headroom,
5580 skb_tailroom(skb), GFP_ATOMIC);
5586 /* New SKB is guaranteed to be linear. */
5588 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5590 /* Make sure the mapping succeeded */
5591 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5593 dev_kfree_skb(new_skb);
5596 /* Make sure new skb does not cross any 4G boundaries.
5597 * Drop the packet if it does.
5599 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5600 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5601 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5604 dev_kfree_skb(new_skb);
5607 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5608 base_flags, 1 | (mss << 1));
5609 *start = NEXT_TX(entry);
5613 /* Now clean up the sw ring entries. */
5615 while (entry != last_plus_one) {
5619 len = skb_headlen(skb);
5621 len = skb_shinfo(skb)->frags[i-1].size;
5623 pci_unmap_single(tp->pdev,
5624 dma_unmap_addr(&tnapi->tx_buffers[entry],
5626 len, PCI_DMA_TODEVICE);
5628 tnapi->tx_buffers[entry].skb = new_skb;
5629 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5632 tnapi->tx_buffers[entry].skb = NULL;
5634 entry = NEXT_TX(entry);
5643 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5644 dma_addr_t mapping, int len, u32 flags,
5647 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5648 int is_end = (mss_and_is_end & 0x1);
5649 u32 mss = (mss_and_is_end >> 1);
5653 flags |= TXD_FLAG_END;
5654 if (flags & TXD_FLAG_VLAN) {
5655 vlan_tag = flags >> 16;
5658 vlan_tag |= (mss << TXD_MSS_SHIFT);
5660 txd->addr_hi = ((u64) mapping >> 32);
5661 txd->addr_lo = ((u64) mapping & 0xffffffff);
5662 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5663 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5666 /* hard_start_xmit for devices that don't have any bugs and
5667 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5669 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5670 struct net_device *dev)
5672 struct tg3 *tp = netdev_priv(dev);
5673 u32 len, entry, base_flags, mss;
5675 struct tg3_napi *tnapi;
5676 struct netdev_queue *txq;
5677 unsigned int i, last;
5679 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5680 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5681 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5684 /* We are running in BH disabled context with netif_tx_lock
5685 * and TX reclaim runs via tp->napi.poll inside of a software
5686 * interrupt. Furthermore, IRQ processing runs lockless so we have
5687 * no IRQ context deadlocks to worry about either. Rejoice!
5689 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5690 if (!netif_tx_queue_stopped(txq)) {
5691 netif_tx_stop_queue(txq);
5693 /* This is a hard error, log it. */
5695 "BUG! Tx Ring full when queue awake!\n");
5697 return NETDEV_TX_BUSY;
5700 entry = tnapi->tx_prod;
5702 mss = skb_shinfo(skb)->gso_size;
5704 int tcp_opt_len, ip_tcp_len;
5707 if (skb_header_cloned(skb) &&
5708 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5713 if (skb_is_gso_v6(skb)) {
5714 hdrlen = skb_headlen(skb) - ETH_HLEN;
5716 struct iphdr *iph = ip_hdr(skb);
5718 tcp_opt_len = tcp_optlen(skb);
5719 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5722 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5723 hdrlen = ip_tcp_len + tcp_opt_len;
5726 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5727 mss |= (hdrlen & 0xc) << 12;
5729 base_flags |= 0x00000010;
5730 base_flags |= (hdrlen & 0x3e0) << 5;
5734 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5735 TXD_FLAG_CPU_POST_DMA);
5737 tcp_hdr(skb)->check = 0;
5739 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5740 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5743 if (vlan_tx_tag_present(skb))
5744 base_flags |= (TXD_FLAG_VLAN |
5745 (vlan_tx_tag_get(skb) << 16));
5747 len = skb_headlen(skb);
5749 /* Queue skb data, a.k.a. the main skb fragment. */
5750 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5751 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5756 tnapi->tx_buffers[entry].skb = skb;
5757 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5759 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5760 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5761 base_flags |= TXD_FLAG_JMB_PKT;
5763 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5764 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5766 entry = NEXT_TX(entry);
5768 /* Now loop through additional data fragments, and queue them. */
5769 if (skb_shinfo(skb)->nr_frags > 0) {
5770 last = skb_shinfo(skb)->nr_frags - 1;
5771 for (i = 0; i <= last; i++) {
5772 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5775 mapping = pci_map_page(tp->pdev,
5778 len, PCI_DMA_TODEVICE);
5779 if (pci_dma_mapping_error(tp->pdev, mapping))
5782 tnapi->tx_buffers[entry].skb = NULL;
5783 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5786 tg3_set_txd(tnapi, entry, mapping, len,
5787 base_flags, (i == last) | (mss << 1));
5789 entry = NEXT_TX(entry);
5793 /* Packets are ready, update Tx producer idx local and on card. */
5794 tw32_tx_mbox(tnapi->prodmbox, entry);
5796 tnapi->tx_prod = entry;
5797 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5798 netif_tx_stop_queue(txq);
5800 /* netif_tx_stop_queue() must be done before checking
5801 * checking tx index in tg3_tx_avail() below, because in
5802 * tg3_tx(), we update tx index before checking for
5803 * netif_tx_queue_stopped().
5806 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5807 netif_tx_wake_queue(txq);
5813 return NETDEV_TX_OK;
5817 entry = tnapi->tx_prod;
5818 tnapi->tx_buffers[entry].skb = NULL;
5819 pci_unmap_single(tp->pdev,
5820 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5823 for (i = 0; i <= last; i++) {
5824 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5825 entry = NEXT_TX(entry);
5827 pci_unmap_page(tp->pdev,
5828 dma_unmap_addr(&tnapi->tx_buffers[entry],
5830 frag->size, PCI_DMA_TODEVICE);
5834 return NETDEV_TX_OK;
5837 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5838 struct net_device *);
5840 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5841 * TSO header is greater than 80 bytes.
5843 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5845 struct sk_buff *segs, *nskb;
5846 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5848 /* Estimate the number of fragments in the worst case */
5849 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5850 netif_stop_queue(tp->dev);
5852 /* netif_tx_stop_queue() must be done before checking
5853 * checking tx index in tg3_tx_avail() below, because in
5854 * tg3_tx(), we update tx index before checking for
5855 * netif_tx_queue_stopped().
5858 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5859 return NETDEV_TX_BUSY;
5861 netif_wake_queue(tp->dev);
5864 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5866 goto tg3_tso_bug_end;
5872 tg3_start_xmit_dma_bug(nskb, tp->dev);
5878 return NETDEV_TX_OK;
5881 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5882 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5884 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5885 struct net_device *dev)
5887 struct tg3 *tp = netdev_priv(dev);
5888 u32 len, entry, base_flags, mss;
5889 int would_hit_hwbug;
5891 struct tg3_napi *tnapi;
5892 struct netdev_queue *txq;
5893 unsigned int i, last;
5895 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5896 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5897 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5900 /* We are running in BH disabled context with netif_tx_lock
5901 * and TX reclaim runs via tp->napi.poll inside of a software
5902 * interrupt. Furthermore, IRQ processing runs lockless so we have
5903 * no IRQ context deadlocks to worry about either. Rejoice!
5905 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5906 if (!netif_tx_queue_stopped(txq)) {
5907 netif_tx_stop_queue(txq);
5909 /* This is a hard error, log it. */
5911 "BUG! Tx Ring full when queue awake!\n");
5913 return NETDEV_TX_BUSY;
5916 entry = tnapi->tx_prod;
5918 if (skb->ip_summed == CHECKSUM_PARTIAL)
5919 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5921 mss = skb_shinfo(skb)->gso_size;
5924 u32 tcp_opt_len, hdr_len;
5926 if (skb_header_cloned(skb) &&
5927 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5933 tcp_opt_len = tcp_optlen(skb);
5935 if (skb_is_gso_v6(skb)) {
5936 hdr_len = skb_headlen(skb) - ETH_HLEN;
5940 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5941 hdr_len = ip_tcp_len + tcp_opt_len;
5944 iph->tot_len = htons(mss + hdr_len);
5947 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5948 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5949 return tg3_tso_bug(tp, skb);
5951 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5952 TXD_FLAG_CPU_POST_DMA);
5954 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5955 tcp_hdr(skb)->check = 0;
5956 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5958 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5963 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5964 mss |= (hdr_len & 0xc) << 12;
5966 base_flags |= 0x00000010;
5967 base_flags |= (hdr_len & 0x3e0) << 5;
5968 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5969 mss |= hdr_len << 9;
5970 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5972 if (tcp_opt_len || iph->ihl > 5) {
5975 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5976 mss |= (tsflags << 11);
5979 if (tcp_opt_len || iph->ihl > 5) {
5982 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5983 base_flags |= tsflags << 12;
5988 if (vlan_tx_tag_present(skb))
5989 base_flags |= (TXD_FLAG_VLAN |
5990 (vlan_tx_tag_get(skb) << 16));
5992 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5993 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5994 base_flags |= TXD_FLAG_JMB_PKT;
5996 len = skb_headlen(skb);
5998 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5999 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6004 tnapi->tx_buffers[entry].skb = skb;
6005 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6007 would_hit_hwbug = 0;
6009 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
6010 would_hit_hwbug = 1;
6012 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
6013 tg3_4g_overflow_test(mapping, len))
6014 would_hit_hwbug = 1;
6016 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
6017 tg3_40bit_overflow_test(tp, mapping, len))
6018 would_hit_hwbug = 1;
6020 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
6021 would_hit_hwbug = 1;
6023 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6024 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6026 entry = NEXT_TX(entry);
6028 /* Now loop through additional data fragments, and queue them. */
6029 if (skb_shinfo(skb)->nr_frags > 0) {
6030 last = skb_shinfo(skb)->nr_frags - 1;
6031 for (i = 0; i <= last; i++) {
6032 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6035 mapping = pci_map_page(tp->pdev,
6038 len, PCI_DMA_TODEVICE);
6040 tnapi->tx_buffers[entry].skb = NULL;
6041 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6043 if (pci_dma_mapping_error(tp->pdev, mapping))
6046 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
6048 would_hit_hwbug = 1;
6050 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
6051 tg3_4g_overflow_test(mapping, len))
6052 would_hit_hwbug = 1;
6054 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
6055 tg3_40bit_overflow_test(tp, mapping, len))
6056 would_hit_hwbug = 1;
6058 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6059 tg3_set_txd(tnapi, entry, mapping, len,
6060 base_flags, (i == last)|(mss << 1));
6062 tg3_set_txd(tnapi, entry, mapping, len,
6063 base_flags, (i == last));
6065 entry = NEXT_TX(entry);
6069 if (would_hit_hwbug) {
6070 u32 last_plus_one = entry;
6073 start = entry - 1 - skb_shinfo(skb)->nr_frags;
6074 start &= (TG3_TX_RING_SIZE - 1);
6076 /* If the workaround fails due to memory/mapping
6077 * failure, silently drop this packet.
6079 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
6080 &start, base_flags, mss))
6086 /* Packets are ready, update Tx producer idx local and on card. */
6087 tw32_tx_mbox(tnapi->prodmbox, entry);
6089 tnapi->tx_prod = entry;
6090 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6091 netif_tx_stop_queue(txq);
6093 /* netif_tx_stop_queue() must be done before checking
6094 * checking tx index in tg3_tx_avail() below, because in
6095 * tg3_tx(), we update tx index before checking for
6096 * netif_tx_queue_stopped().
6099 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6100 netif_tx_wake_queue(txq);
6106 return NETDEV_TX_OK;
6110 entry = tnapi->tx_prod;
6111 tnapi->tx_buffers[entry].skb = NULL;
6112 pci_unmap_single(tp->pdev,
6113 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
6116 for (i = 0; i <= last; i++) {
6117 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6118 entry = NEXT_TX(entry);
6120 pci_unmap_page(tp->pdev,
6121 dma_unmap_addr(&tnapi->tx_buffers[entry],
6123 frag->size, PCI_DMA_TODEVICE);
6127 return NETDEV_TX_OK;
6130 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6135 if (new_mtu > ETH_DATA_LEN) {
6136 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6137 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
6138 ethtool_op_set_tso(dev, 0);
6140 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
6143 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6144 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
6145 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
6149 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6151 struct tg3 *tp = netdev_priv(dev);
6154 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6157 if (!netif_running(dev)) {
6158 /* We'll just catch it later when the
6161 tg3_set_mtu(dev, tp, new_mtu);
6169 tg3_full_lock(tp, 1);
6171 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6173 tg3_set_mtu(dev, tp, new_mtu);
6175 err = tg3_restart_hw(tp, 0);
6178 tg3_netif_start(tp);
6180 tg3_full_unlock(tp);
6188 static void tg3_rx_prodring_free(struct tg3 *tp,
6189 struct tg3_rx_prodring_set *tpr)
6193 if (tpr != &tp->napi[0].prodring) {
6194 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6195 i = (i + 1) & tp->rx_std_ring_mask)
6196 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6199 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6200 for (i = tpr->rx_jmb_cons_idx;
6201 i != tpr->rx_jmb_prod_idx;
6202 i = (i + 1) & tp->rx_jmb_ring_mask) {
6203 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6211 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6212 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6215 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6216 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6217 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6218 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6223 /* Initialize rx rings for packet processing.
6225 * The chip has been shut down and the driver detached from
6226 * the networking, so no interrupts or new tx packets will
6227 * end up in the driver. tp->{tx,}lock are held and thus
6230 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6231 struct tg3_rx_prodring_set *tpr)
6233 u32 i, rx_pkt_dma_sz;
6235 tpr->rx_std_cons_idx = 0;
6236 tpr->rx_std_prod_idx = 0;
6237 tpr->rx_jmb_cons_idx = 0;
6238 tpr->rx_jmb_prod_idx = 0;
6240 if (tpr != &tp->napi[0].prodring) {
6241 memset(&tpr->rx_std_buffers[0], 0,
6242 TG3_RX_STD_BUFF_RING_SIZE(tp));
6243 if (tpr->rx_jmb_buffers)
6244 memset(&tpr->rx_jmb_buffers[0], 0,
6245 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6249 /* Zero out all descriptors. */
6250 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6252 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6253 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6254 tp->dev->mtu > ETH_DATA_LEN)
6255 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6256 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6258 /* Initialize invariants of the rings, we only set this
6259 * stuff once. This works because the card does not
6260 * write into the rx buffer posting rings.
6262 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6263 struct tg3_rx_buffer_desc *rxd;
6265 rxd = &tpr->rx_std[i];
6266 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6267 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6268 rxd->opaque = (RXD_OPAQUE_RING_STD |
6269 (i << RXD_OPAQUE_INDEX_SHIFT));
6272 /* Now allocate fresh SKBs for each rx ring. */
6273 for (i = 0; i < tp->rx_pending; i++) {
6274 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6275 netdev_warn(tp->dev,
6276 "Using a smaller RX standard ring. Only "
6277 "%d out of %d buffers were allocated "
6278 "successfully\n", i, tp->rx_pending);
6286 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ||
6287 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6290 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6292 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6295 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6296 struct tg3_rx_buffer_desc *rxd;
6298 rxd = &tpr->rx_jmb[i].std;
6299 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6300 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6302 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6303 (i << RXD_OPAQUE_INDEX_SHIFT));
6306 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6307 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6308 netdev_warn(tp->dev,
6309 "Using a smaller RX jumbo ring. Only %d "
6310 "out of %d buffers were allocated "
6311 "successfully\n", i, tp->rx_jumbo_pending);
6314 tp->rx_jumbo_pending = i;
6323 tg3_rx_prodring_free(tp, tpr);
6327 static void tg3_rx_prodring_fini(struct tg3 *tp,
6328 struct tg3_rx_prodring_set *tpr)
6330 kfree(tpr->rx_std_buffers);
6331 tpr->rx_std_buffers = NULL;
6332 kfree(tpr->rx_jmb_buffers);
6333 tpr->rx_jmb_buffers = NULL;
6335 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6336 tpr->rx_std, tpr->rx_std_mapping);
6340 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6341 tpr->rx_jmb, tpr->rx_jmb_mapping);
6346 static int tg3_rx_prodring_init(struct tg3 *tp,
6347 struct tg3_rx_prodring_set *tpr)
6349 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6351 if (!tpr->rx_std_buffers)
6354 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6355 TG3_RX_STD_RING_BYTES(tp),
6356 &tpr->rx_std_mapping,
6361 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6362 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6363 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6365 if (!tpr->rx_jmb_buffers)
6368 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6369 TG3_RX_JMB_RING_BYTES(tp),
6370 &tpr->rx_jmb_mapping,
6379 tg3_rx_prodring_fini(tp, tpr);
6383 /* Free up pending packets in all rx/tx rings.
6385 * The chip has been shut down and the driver detached from
6386 * the networking, so no interrupts or new tx packets will
6387 * end up in the driver. tp->{tx,}lock is not held and we are not
6388 * in an interrupt context and thus may sleep.
6390 static void tg3_free_rings(struct tg3 *tp)
6394 for (j = 0; j < tp->irq_cnt; j++) {
6395 struct tg3_napi *tnapi = &tp->napi[j];
6397 tg3_rx_prodring_free(tp, &tnapi->prodring);
6399 if (!tnapi->tx_buffers)
6402 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6403 struct ring_info *txp;
6404 struct sk_buff *skb;
6407 txp = &tnapi->tx_buffers[i];
6415 pci_unmap_single(tp->pdev,
6416 dma_unmap_addr(txp, mapping),
6423 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6424 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6425 pci_unmap_page(tp->pdev,
6426 dma_unmap_addr(txp, mapping),
6427 skb_shinfo(skb)->frags[k].size,
6432 dev_kfree_skb_any(skb);
6437 /* Initialize tx/rx rings for packet processing.
6439 * The chip has been shut down and the driver detached from
6440 * the networking, so no interrupts or new tx packets will
6441 * end up in the driver. tp->{tx,}lock are held and thus
6444 static int tg3_init_rings(struct tg3 *tp)
6448 /* Free up all the SKBs. */
6451 for (i = 0; i < tp->irq_cnt; i++) {
6452 struct tg3_napi *tnapi = &tp->napi[i];
6454 tnapi->last_tag = 0;
6455 tnapi->last_irq_tag = 0;
6456 tnapi->hw_status->status = 0;
6457 tnapi->hw_status->status_tag = 0;
6458 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6463 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6465 tnapi->rx_rcb_ptr = 0;
6467 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6469 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6479 * Must not be invoked with interrupt sources disabled and
6480 * the hardware shutdown down.
6482 static void tg3_free_consistent(struct tg3 *tp)
6486 for (i = 0; i < tp->irq_cnt; i++) {
6487 struct tg3_napi *tnapi = &tp->napi[i];
6489 if (tnapi->tx_ring) {
6490 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6491 tnapi->tx_ring, tnapi->tx_desc_mapping);
6492 tnapi->tx_ring = NULL;
6495 kfree(tnapi->tx_buffers);
6496 tnapi->tx_buffers = NULL;
6498 if (tnapi->rx_rcb) {
6499 dma_free_coherent(&tp->pdev->dev,
6500 TG3_RX_RCB_RING_BYTES(tp),
6502 tnapi->rx_rcb_mapping);
6503 tnapi->rx_rcb = NULL;
6506 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6508 if (tnapi->hw_status) {
6509 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6511 tnapi->status_mapping);
6512 tnapi->hw_status = NULL;
6517 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6518 tp->hw_stats, tp->stats_mapping);
6519 tp->hw_stats = NULL;
6524 * Must not be invoked with interrupt sources disabled and
6525 * the hardware shutdown down. Can sleep.
6527 static int tg3_alloc_consistent(struct tg3 *tp)
6531 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6532 sizeof(struct tg3_hw_stats),
6538 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6540 for (i = 0; i < tp->irq_cnt; i++) {
6541 struct tg3_napi *tnapi = &tp->napi[i];
6542 struct tg3_hw_status *sblk;
6544 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6546 &tnapi->status_mapping,
6548 if (!tnapi->hw_status)
6551 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6552 sblk = tnapi->hw_status;
6554 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6557 /* If multivector TSS is enabled, vector 0 does not handle
6558 * tx interrupts. Don't allocate any resources for it.
6560 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6561 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6562 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6565 if (!tnapi->tx_buffers)
6568 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6570 &tnapi->tx_desc_mapping,
6572 if (!tnapi->tx_ring)
6577 * When RSS is enabled, the status block format changes
6578 * slightly. The "rx_jumbo_consumer", "reserved",
6579 * and "rx_mini_consumer" members get mapped to the
6580 * other three rx return ring producer indexes.
6584 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6587 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6590 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6593 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6598 * If multivector RSS is enabled, vector 0 does not handle
6599 * rx or tx interrupts. Don't allocate any resources for it.
6601 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6604 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6605 TG3_RX_RCB_RING_BYTES(tp),
6606 &tnapi->rx_rcb_mapping,
6611 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6617 tg3_free_consistent(tp);
6621 #define MAX_WAIT_CNT 1000
6623 /* To stop a block, clear the enable bit and poll till it
6624 * clears. tp->lock is held.
6626 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6631 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6638 /* We can't enable/disable these bits of the
6639 * 5705/5750, just say success.
6652 for (i = 0; i < MAX_WAIT_CNT; i++) {
6655 if ((val & enable_bit) == 0)
6659 if (i == MAX_WAIT_CNT && !silent) {
6660 dev_err(&tp->pdev->dev,
6661 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6669 /* tp->lock is held. */
6670 static int tg3_abort_hw(struct tg3 *tp, int silent)
6674 tg3_disable_ints(tp);
6676 tp->rx_mode &= ~RX_MODE_ENABLE;
6677 tw32_f(MAC_RX_MODE, tp->rx_mode);
6680 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6681 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6682 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6683 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6684 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6685 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6687 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6688 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6689 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6690 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6691 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6692 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6693 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6695 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6696 tw32_f(MAC_MODE, tp->mac_mode);
6699 tp->tx_mode &= ~TX_MODE_ENABLE;
6700 tw32_f(MAC_TX_MODE, tp->tx_mode);
6702 for (i = 0; i < MAX_WAIT_CNT; i++) {
6704 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6707 if (i >= MAX_WAIT_CNT) {
6708 dev_err(&tp->pdev->dev,
6709 "%s timed out, TX_MODE_ENABLE will not clear "
6710 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6714 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6715 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6716 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6718 tw32(FTQ_RESET, 0xffffffff);
6719 tw32(FTQ_RESET, 0x00000000);
6721 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6722 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6724 for (i = 0; i < tp->irq_cnt; i++) {
6725 struct tg3_napi *tnapi = &tp->napi[i];
6726 if (tnapi->hw_status)
6727 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6730 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6735 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6740 /* NCSI does not support APE events */
6741 if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
6744 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6745 if (apedata != APE_SEG_SIG_MAGIC)
6748 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6749 if (!(apedata & APE_FW_STATUS_READY))
6752 /* Wait for up to 1 millisecond for APE to service previous event. */
6753 for (i = 0; i < 10; i++) {
6754 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6757 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6759 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6760 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6761 event | APE_EVENT_STATUS_EVENT_PENDING);
6763 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6765 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6771 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6772 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6775 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6780 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6784 case RESET_KIND_INIT:
6785 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6786 APE_HOST_SEG_SIG_MAGIC);
6787 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6788 APE_HOST_SEG_LEN_MAGIC);
6789 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6790 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6791 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6792 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6793 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6794 APE_HOST_BEHAV_NO_PHYLOCK);
6795 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6796 TG3_APE_HOST_DRVR_STATE_START);
6798 event = APE_EVENT_STATUS_STATE_START;
6800 case RESET_KIND_SHUTDOWN:
6801 /* With the interface we are currently using,
6802 * APE does not track driver state. Wiping
6803 * out the HOST SEGMENT SIGNATURE forces
6804 * the APE to assume OS absent status.
6806 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6808 if (device_may_wakeup(&tp->pdev->dev) &&
6809 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
6810 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6811 TG3_APE_HOST_WOL_SPEED_AUTO);
6812 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6814 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6816 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6818 event = APE_EVENT_STATUS_STATE_UNLOAD;
6820 case RESET_KIND_SUSPEND:
6821 event = APE_EVENT_STATUS_STATE_SUSPEND;
6827 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6829 tg3_ape_send_event(tp, event);
6832 /* tp->lock is held. */
6833 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6835 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6836 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6838 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6840 case RESET_KIND_INIT:
6841 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6845 case RESET_KIND_SHUTDOWN:
6846 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6850 case RESET_KIND_SUSPEND:
6851 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6860 if (kind == RESET_KIND_INIT ||
6861 kind == RESET_KIND_SUSPEND)
6862 tg3_ape_driver_state_change(tp, kind);
6865 /* tp->lock is held. */
6866 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6868 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6870 case RESET_KIND_INIT:
6871 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6872 DRV_STATE_START_DONE);
6875 case RESET_KIND_SHUTDOWN:
6876 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6877 DRV_STATE_UNLOAD_DONE);
6885 if (kind == RESET_KIND_SHUTDOWN)
6886 tg3_ape_driver_state_change(tp, kind);
6889 /* tp->lock is held. */
6890 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6892 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6894 case RESET_KIND_INIT:
6895 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6899 case RESET_KIND_SHUTDOWN:
6900 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6904 case RESET_KIND_SUSPEND:
6905 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6915 static int tg3_poll_fw(struct tg3 *tp)
6920 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6921 /* Wait up to 20ms for init done. */
6922 for (i = 0; i < 200; i++) {
6923 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6930 /* Wait for firmware initialization to complete. */
6931 for (i = 0; i < 100000; i++) {
6932 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6933 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6938 /* Chip might not be fitted with firmware. Some Sun onboard
6939 * parts are configured like that. So don't signal the timeout
6940 * of the above loop as an error, but do report the lack of
6941 * running firmware once.
6944 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6945 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6947 netdev_info(tp->dev, "No firmware running\n");
6950 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6951 /* The 57765 A0 needs a little more
6952 * time to do some important work.
6960 /* Save PCI command register before chip reset */
6961 static void tg3_save_pci_state(struct tg3 *tp)
6963 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6966 /* Restore PCI state after chip reset */
6967 static void tg3_restore_pci_state(struct tg3 *tp)
6971 /* Re-enable indirect register accesses. */
6972 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6973 tp->misc_host_ctrl);
6975 /* Set MAX PCI retry to zero. */
6976 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6977 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6978 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6979 val |= PCISTATE_RETRY_SAME_DMA;
6980 /* Allow reads and writes to the APE register and memory space. */
6981 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6982 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6983 PCISTATE_ALLOW_APE_SHMEM_WR |
6984 PCISTATE_ALLOW_APE_PSPACE_WR;
6985 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6987 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6989 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6990 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6991 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
6993 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6994 tp->pci_cacheline_sz);
6995 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7000 /* Make sure PCI-X relaxed ordering bit is clear. */
7001 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7004 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7006 pcix_cmd &= ~PCI_X_CMD_ERO;
7007 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7011 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
7013 /* Chip reset on 5780 will reset MSI enable bit,
7014 * so need to restore it.
7016 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7019 pci_read_config_word(tp->pdev,
7020 tp->msi_cap + PCI_MSI_FLAGS,
7022 pci_write_config_word(tp->pdev,
7023 tp->msi_cap + PCI_MSI_FLAGS,
7024 ctrl | PCI_MSI_FLAGS_ENABLE);
7025 val = tr32(MSGINT_MODE);
7026 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7031 static void tg3_stop_fw(struct tg3 *);
7033 /* tp->lock is held. */
7034 static int tg3_chip_reset(struct tg3 *tp)
7037 void (*write_op)(struct tg3 *, u32, u32);
7042 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7044 /* No matching tg3_nvram_unlock() after this because
7045 * chip reset below will undo the nvram lock.
7047 tp->nvram_lock_cnt = 0;
7049 /* GRC_MISC_CFG core clock reset will clear the memory
7050 * enable bit in PCI register 4 and the MSI enable bit
7051 * on some chips, so we save relevant registers here.
7053 tg3_save_pci_state(tp);
7055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7056 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7057 tw32(GRC_FASTBOOT_PC, 0);
7060 * We must avoid the readl() that normally takes place.
7061 * It locks machines, causes machine checks, and other
7062 * fun things. So, temporarily disable the 5701
7063 * hardware workaround, while we do the reset.
7065 write_op = tp->write32;
7066 if (write_op == tg3_write_flush_reg32)
7067 tp->write32 = tg3_write32;
7069 /* Prevent the irq handler from reading or writing PCI registers
7070 * during chip reset when the memory enable bit in the PCI command
7071 * register may be cleared. The chip does not generate interrupt
7072 * at this time, but the irq handler may still be called due to irq
7073 * sharing or irqpoll.
7075 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
7076 for (i = 0; i < tp->irq_cnt; i++) {
7077 struct tg3_napi *tnapi = &tp->napi[i];
7078 if (tnapi->hw_status) {
7079 tnapi->hw_status->status = 0;
7080 tnapi->hw_status->status_tag = 0;
7082 tnapi->last_tag = 0;
7083 tnapi->last_irq_tag = 0;
7087 for (i = 0; i < tp->irq_cnt; i++)
7088 synchronize_irq(tp->napi[i].irq_vec);
7090 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7091 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7092 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7096 val = GRC_MISC_CFG_CORECLK_RESET;
7098 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7099 /* Force PCIe 1.0a mode */
7100 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7101 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
7102 tr32(TG3_PCIE_PHY_TSTCTL) ==
7103 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7104 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7106 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7107 tw32(GRC_MISC_CFG, (1 << 29));
7112 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7113 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7114 tw32(GRC_VCPU_EXT_CTRL,
7115 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7118 /* Manage gphy power for all CPMU absent PCIe devices. */
7119 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7120 !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7121 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7123 tw32(GRC_MISC_CFG, val);
7125 /* restore 5701 hardware bug workaround write method */
7126 tp->write32 = write_op;
7128 /* Unfortunately, we have to delay before the PCI read back.
7129 * Some 575X chips even will not respond to a PCI cfg access
7130 * when the reset command is given to the chip.
7132 * How do these hardware designers expect things to work
7133 * properly if the PCI write is posted for a long period
7134 * of time? It is always necessary to have some method by
7135 * which a register read back can occur to push the write
7136 * out which does the reset.
7138 * For most tg3 variants the trick below was working.
7143 /* Flush PCI posted writes. The normal MMIO registers
7144 * are inaccessible at this time so this is the only
7145 * way to make this reliably (actually, this is no longer
7146 * the case, see above). I tried to use indirect
7147 * register read/write but this upset some 5701 variants.
7149 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7153 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
7156 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7160 /* Wait for link training to complete. */
7161 for (i = 0; i < 5000; i++)
7164 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7165 pci_write_config_dword(tp->pdev, 0xc4,
7166 cfg_val | (1 << 15));
7169 /* Clear the "no snoop" and "relaxed ordering" bits. */
7170 pci_read_config_word(tp->pdev,
7171 tp->pcie_cap + PCI_EXP_DEVCTL,
7173 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7174 PCI_EXP_DEVCTL_NOSNOOP_EN);
7176 * Older PCIe devices only support the 128 byte
7177 * MPS setting. Enforce the restriction.
7179 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7180 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7181 pci_write_config_word(tp->pdev,
7182 tp->pcie_cap + PCI_EXP_DEVCTL,
7185 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7187 /* Clear error status */
7188 pci_write_config_word(tp->pdev,
7189 tp->pcie_cap + PCI_EXP_DEVSTA,
7190 PCI_EXP_DEVSTA_CED |
7191 PCI_EXP_DEVSTA_NFED |
7192 PCI_EXP_DEVSTA_FED |
7193 PCI_EXP_DEVSTA_URD);
7196 tg3_restore_pci_state(tp);
7198 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
7201 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7202 val = tr32(MEMARB_MODE);
7203 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7205 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7207 tw32(0x5000, 0x400);
7210 tw32(GRC_MODE, tp->grc_mode);
7212 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7215 tw32(0xc4, val | (1 << 15));
7218 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7220 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7221 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7222 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7223 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7226 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7227 tp->mac_mode = MAC_MODE_APE_TX_EN |
7228 MAC_MODE_APE_RX_EN |
7229 MAC_MODE_TDE_ENABLE;
7231 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7232 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7234 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7235 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7240 tw32_f(MAC_MODE, val);
7243 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7245 err = tg3_poll_fw(tp);
7251 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7252 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7253 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7254 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
7257 tw32(0x7c00, val | (1 << 25));
7260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7261 val = tr32(TG3_CPMU_CLCK_ORIDE);
7262 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7265 /* Reprobe ASF enable state. */
7266 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7267 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7268 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7269 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7272 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7273 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7274 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7275 tp->last_event_jiffies = jiffies;
7276 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7277 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7284 /* tp->lock is held. */
7285 static void tg3_stop_fw(struct tg3 *tp)
7287 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7288 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7289 /* Wait for RX cpu to ACK the previous event. */
7290 tg3_wait_for_event_ack(tp);
7292 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7294 tg3_generate_fw_event(tp);
7296 /* Wait for RX cpu to ACK this event. */
7297 tg3_wait_for_event_ack(tp);
7301 /* tp->lock is held. */
7302 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7308 tg3_write_sig_pre_reset(tp, kind);
7310 tg3_abort_hw(tp, silent);
7311 err = tg3_chip_reset(tp);
7313 __tg3_set_mac_addr(tp, 0);
7315 tg3_write_sig_legacy(tp, kind);
7316 tg3_write_sig_post_reset(tp, kind);
7324 #define RX_CPU_SCRATCH_BASE 0x30000
7325 #define RX_CPU_SCRATCH_SIZE 0x04000
7326 #define TX_CPU_SCRATCH_BASE 0x34000
7327 #define TX_CPU_SCRATCH_SIZE 0x04000
7329 /* tp->lock is held. */
7330 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7334 BUG_ON(offset == TX_CPU_BASE &&
7335 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7337 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7338 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7340 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7343 if (offset == RX_CPU_BASE) {
7344 for (i = 0; i < 10000; i++) {
7345 tw32(offset + CPU_STATE, 0xffffffff);
7346 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7347 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7351 tw32(offset + CPU_STATE, 0xffffffff);
7352 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7355 for (i = 0; i < 10000; i++) {
7356 tw32(offset + CPU_STATE, 0xffffffff);
7357 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7358 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7364 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7365 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7369 /* Clear firmware's nvram arbitration. */
7370 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7371 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7376 unsigned int fw_base;
7377 unsigned int fw_len;
7378 const __be32 *fw_data;
7381 /* tp->lock is held. */
7382 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7383 int cpu_scratch_size, struct fw_info *info)
7385 int err, lock_err, i;
7386 void (*write_op)(struct tg3 *, u32, u32);
7388 if (cpu_base == TX_CPU_BASE &&
7389 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7391 "%s: Trying to load TX cpu firmware which is 5705\n",
7396 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7397 write_op = tg3_write_mem;
7399 write_op = tg3_write_indirect_reg32;
7401 /* It is possible that bootcode is still loading at this point.
7402 * Get the nvram lock first before halting the cpu.
7404 lock_err = tg3_nvram_lock(tp);
7405 err = tg3_halt_cpu(tp, cpu_base);
7407 tg3_nvram_unlock(tp);
7411 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7412 write_op(tp, cpu_scratch_base + i, 0);
7413 tw32(cpu_base + CPU_STATE, 0xffffffff);
7414 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7415 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7416 write_op(tp, (cpu_scratch_base +
7417 (info->fw_base & 0xffff) +
7419 be32_to_cpu(info->fw_data[i]));
7427 /* tp->lock is held. */
7428 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7430 struct fw_info info;
7431 const __be32 *fw_data;
7434 fw_data = (void *)tp->fw->data;
7436 /* Firmware blob starts with version numbers, followed by
7437 start address and length. We are setting complete length.
7438 length = end_address_of_bss - start_address_of_text.
7439 Remainder is the blob to be loaded contiguously
7440 from start address. */
7442 info.fw_base = be32_to_cpu(fw_data[1]);
7443 info.fw_len = tp->fw->size - 12;
7444 info.fw_data = &fw_data[3];
7446 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7447 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7452 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7453 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7458 /* Now startup only the RX cpu. */
7459 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7460 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7462 for (i = 0; i < 5; i++) {
7463 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7465 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7466 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7467 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7471 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7472 "should be %08x\n", __func__,
7473 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7476 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7477 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7482 /* 5705 needs a special version of the TSO firmware. */
7484 /* tp->lock is held. */
7485 static int tg3_load_tso_firmware(struct tg3 *tp)
7487 struct fw_info info;
7488 const __be32 *fw_data;
7489 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7492 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7495 fw_data = (void *)tp->fw->data;
7497 /* Firmware blob starts with version numbers, followed by
7498 start address and length. We are setting complete length.
7499 length = end_address_of_bss - start_address_of_text.
7500 Remainder is the blob to be loaded contiguously
7501 from start address. */
7503 info.fw_base = be32_to_cpu(fw_data[1]);
7504 cpu_scratch_size = tp->fw_len;
7505 info.fw_len = tp->fw->size - 12;
7506 info.fw_data = &fw_data[3];
7508 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7509 cpu_base = RX_CPU_BASE;
7510 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7512 cpu_base = TX_CPU_BASE;
7513 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7514 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7517 err = tg3_load_firmware_cpu(tp, cpu_base,
7518 cpu_scratch_base, cpu_scratch_size,
7523 /* Now startup the cpu. */
7524 tw32(cpu_base + CPU_STATE, 0xffffffff);
7525 tw32_f(cpu_base + CPU_PC, info.fw_base);
7527 for (i = 0; i < 5; i++) {
7528 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7530 tw32(cpu_base + CPU_STATE, 0xffffffff);
7531 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7532 tw32_f(cpu_base + CPU_PC, info.fw_base);
7537 "%s fails to set CPU PC, is %08x should be %08x\n",
7538 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7541 tw32(cpu_base + CPU_STATE, 0xffffffff);
7542 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7547 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7549 struct tg3 *tp = netdev_priv(dev);
7550 struct sockaddr *addr = p;
7551 int err = 0, skip_mac_1 = 0;
7553 if (!is_valid_ether_addr(addr->sa_data))
7556 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7558 if (!netif_running(dev))
7561 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7562 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7564 addr0_high = tr32(MAC_ADDR_0_HIGH);
7565 addr0_low = tr32(MAC_ADDR_0_LOW);
7566 addr1_high = tr32(MAC_ADDR_1_HIGH);
7567 addr1_low = tr32(MAC_ADDR_1_LOW);
7569 /* Skip MAC addr 1 if ASF is using it. */
7570 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7571 !(addr1_high == 0 && addr1_low == 0))
7574 spin_lock_bh(&tp->lock);
7575 __tg3_set_mac_addr(tp, skip_mac_1);
7576 spin_unlock_bh(&tp->lock);
7581 /* tp->lock is held. */
7582 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7583 dma_addr_t mapping, u32 maxlen_flags,
7587 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7588 ((u64) mapping >> 32));
7590 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7591 ((u64) mapping & 0xffffffff));
7593 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7596 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7598 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7602 static void __tg3_set_rx_mode(struct net_device *);
7603 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7607 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7608 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7609 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7610 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7612 tw32(HOSTCC_TXCOL_TICKS, 0);
7613 tw32(HOSTCC_TXMAX_FRAMES, 0);
7614 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7617 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
7618 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7619 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7620 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7622 tw32(HOSTCC_RXCOL_TICKS, 0);
7623 tw32(HOSTCC_RXMAX_FRAMES, 0);
7624 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7627 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7628 u32 val = ec->stats_block_coalesce_usecs;
7630 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7631 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7633 if (!netif_carrier_ok(tp->dev))
7636 tw32(HOSTCC_STAT_COAL_TICKS, val);
7639 for (i = 0; i < tp->irq_cnt - 1; i++) {
7642 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7643 tw32(reg, ec->rx_coalesce_usecs);
7644 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7645 tw32(reg, ec->rx_max_coalesced_frames);
7646 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7647 tw32(reg, ec->rx_max_coalesced_frames_irq);
7649 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7650 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7651 tw32(reg, ec->tx_coalesce_usecs);
7652 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7653 tw32(reg, ec->tx_max_coalesced_frames);
7654 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7655 tw32(reg, ec->tx_max_coalesced_frames_irq);
7659 for (; i < tp->irq_max - 1; i++) {
7660 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7661 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7662 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7664 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7665 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7666 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7667 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7672 /* tp->lock is held. */
7673 static void tg3_rings_reset(struct tg3 *tp)
7676 u32 stblk, txrcb, rxrcb, limit;
7677 struct tg3_napi *tnapi = &tp->napi[0];
7679 /* Disable all transmit rings but the first. */
7680 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7681 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7682 else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7683 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7684 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7685 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7687 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7689 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7690 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7691 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7692 BDINFO_FLAGS_DISABLED);
7695 /* Disable all receive return rings but the first. */
7696 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7697 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7698 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7699 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7700 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7701 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7702 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7704 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7706 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7707 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7708 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7709 BDINFO_FLAGS_DISABLED);
7711 /* Disable interrupts */
7712 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7714 /* Zero mailbox registers. */
7715 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7716 for (i = 1; i < tp->irq_max; i++) {
7717 tp->napi[i].tx_prod = 0;
7718 tp->napi[i].tx_cons = 0;
7719 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7720 tw32_mailbox(tp->napi[i].prodmbox, 0);
7721 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7722 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7724 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7725 tw32_mailbox(tp->napi[0].prodmbox, 0);
7727 tp->napi[0].tx_prod = 0;
7728 tp->napi[0].tx_cons = 0;
7729 tw32_mailbox(tp->napi[0].prodmbox, 0);
7730 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7733 /* Make sure the NIC-based send BD rings are disabled. */
7734 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7735 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7736 for (i = 0; i < 16; i++)
7737 tw32_tx_mbox(mbox + i * 8, 0);
7740 txrcb = NIC_SRAM_SEND_RCB;
7741 rxrcb = NIC_SRAM_RCV_RET_RCB;
7743 /* Clear status block in ram. */
7744 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7746 /* Set status block DMA address */
7747 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7748 ((u64) tnapi->status_mapping >> 32));
7749 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7750 ((u64) tnapi->status_mapping & 0xffffffff));
7752 if (tnapi->tx_ring) {
7753 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7754 (TG3_TX_RING_SIZE <<
7755 BDINFO_FLAGS_MAXLEN_SHIFT),
7756 NIC_SRAM_TX_BUFFER_DESC);
7757 txrcb += TG3_BDINFO_SIZE;
7760 if (tnapi->rx_rcb) {
7761 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7762 (tp->rx_ret_ring_mask + 1) <<
7763 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7764 rxrcb += TG3_BDINFO_SIZE;
7767 stblk = HOSTCC_STATBLCK_RING1;
7769 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7770 u64 mapping = (u64)tnapi->status_mapping;
7771 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7772 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7774 /* Clear status block in ram. */
7775 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7777 if (tnapi->tx_ring) {
7778 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7779 (TG3_TX_RING_SIZE <<
7780 BDINFO_FLAGS_MAXLEN_SHIFT),
7781 NIC_SRAM_TX_BUFFER_DESC);
7782 txrcb += TG3_BDINFO_SIZE;
7785 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7786 ((tp->rx_ret_ring_mask + 1) <<
7787 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7790 rxrcb += TG3_BDINFO_SIZE;
7794 /* tp->lock is held. */
7795 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7797 u32 val, rdmac_mode;
7799 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7801 tg3_disable_ints(tp);
7805 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7807 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7808 tg3_abort_hw(tp, 1);
7810 /* Enable MAC control of LPI */
7811 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7812 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7813 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7814 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7816 tw32_f(TG3_CPMU_EEE_CTRL,
7817 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7819 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7820 TG3_CPMU_EEEMD_LPI_IN_TX |
7821 TG3_CPMU_EEEMD_LPI_IN_RX |
7822 TG3_CPMU_EEEMD_EEE_ENABLE;
7824 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7825 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7827 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7828 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7830 tw32_f(TG3_CPMU_EEE_MODE, val);
7832 tw32_f(TG3_CPMU_EEE_DBTMR1,
7833 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7834 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7836 tw32_f(TG3_CPMU_EEE_DBTMR2,
7837 TG3_CPMU_DBTMR2_APE_TX_2047US |
7838 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7844 err = tg3_chip_reset(tp);
7848 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7850 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7851 val = tr32(TG3_CPMU_CTRL);
7852 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7853 tw32(TG3_CPMU_CTRL, val);
7855 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7856 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7857 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7858 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7860 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7861 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7862 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7863 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7865 val = tr32(TG3_CPMU_HST_ACC);
7866 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7867 val |= CPMU_HST_ACC_MACCLK_6_25;
7868 tw32(TG3_CPMU_HST_ACC, val);
7871 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7872 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7873 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7874 PCIE_PWR_MGMT_L1_THRESH_4MS;
7875 tw32(PCIE_PWR_MGMT_THRESH, val);
7877 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7878 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7880 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7882 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7883 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7886 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7887 u32 grc_mode = tr32(GRC_MODE);
7889 /* Access the lower 1K of PL PCIE block registers. */
7890 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7891 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7893 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7894 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7895 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7897 tw32(GRC_MODE, grc_mode);
7900 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7901 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7902 u32 grc_mode = tr32(GRC_MODE);
7904 /* Access the lower 1K of PL PCIE block registers. */
7905 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7906 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7908 val = tr32(TG3_PCIE_TLDLPL_PORT +
7909 TG3_PCIE_PL_LO_PHYCTL5);
7910 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7911 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7913 tw32(GRC_MODE, grc_mode);
7916 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7917 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7918 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7919 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7922 /* This works around an issue with Athlon chipsets on
7923 * B3 tigon3 silicon. This bit has no effect on any
7924 * other revision. But do not set this on PCI Express
7925 * chips and don't even touch the clocks if the CPMU is present.
7927 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7928 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7929 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7930 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7933 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7934 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7935 val = tr32(TG3PCI_PCISTATE);
7936 val |= PCISTATE_RETRY_SAME_DMA;
7937 tw32(TG3PCI_PCISTATE, val);
7940 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7941 /* Allow reads and writes to the
7942 * APE register and memory space.
7944 val = tr32(TG3PCI_PCISTATE);
7945 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7946 PCISTATE_ALLOW_APE_SHMEM_WR |
7947 PCISTATE_ALLOW_APE_PSPACE_WR;
7948 tw32(TG3PCI_PCISTATE, val);
7951 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7952 /* Enable some hw fixes. */
7953 val = tr32(TG3PCI_MSI_DATA);
7954 val |= (1 << 26) | (1 << 28) | (1 << 29);
7955 tw32(TG3PCI_MSI_DATA, val);
7958 /* Descriptor ring init may make accesses to the
7959 * NIC SRAM area to setup the TX descriptors, so we
7960 * can only do this after the hardware has been
7961 * successfully reset.
7963 err = tg3_init_rings(tp);
7967 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
7968 val = tr32(TG3PCI_DMA_RW_CTRL) &
7969 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7970 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7971 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
7972 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7973 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7974 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7975 /* This value is determined during the probe time DMA
7976 * engine test, tg3_test_dma.
7978 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7981 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7982 GRC_MODE_4X_NIC_SEND_RINGS |
7983 GRC_MODE_NO_TX_PHDR_CSUM |
7984 GRC_MODE_NO_RX_PHDR_CSUM);
7985 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7987 /* Pseudo-header checksum is done by hardware logic and not
7988 * the offload processers, so make the chip do the pseudo-
7989 * header checksums on receive. For transmit it is more
7990 * convenient to do the pseudo-header checksum in software
7991 * as Linux does that on transmit for us in all cases.
7993 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7997 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7999 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8000 val = tr32(GRC_MISC_CFG);
8002 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8003 tw32(GRC_MISC_CFG, val);
8005 /* Initialize MBUF/DESC pool. */
8006 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8008 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8009 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8010 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8011 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8013 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8014 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8015 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8016 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8019 fw_len = tp->fw_len;
8020 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8021 tw32(BUFMGR_MB_POOL_ADDR,
8022 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8023 tw32(BUFMGR_MB_POOL_SIZE,
8024 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8027 if (tp->dev->mtu <= ETH_DATA_LEN) {
8028 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8029 tp->bufmgr_config.mbuf_read_dma_low_water);
8030 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8031 tp->bufmgr_config.mbuf_mac_rx_low_water);
8032 tw32(BUFMGR_MB_HIGH_WATER,
8033 tp->bufmgr_config.mbuf_high_water);
8035 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8036 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8037 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8038 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8039 tw32(BUFMGR_MB_HIGH_WATER,
8040 tp->bufmgr_config.mbuf_high_water_jumbo);
8042 tw32(BUFMGR_DMA_LOW_WATER,
8043 tp->bufmgr_config.dma_low_water);
8044 tw32(BUFMGR_DMA_HIGH_WATER,
8045 tp->bufmgr_config.dma_high_water);
8047 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8049 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8050 tw32(BUFMGR_MODE, val);
8051 for (i = 0; i < 2000; i++) {
8052 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8057 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8061 /* Setup replenish threshold. */
8062 val = tp->rx_pending / 8;
8065 else if (val > tp->rx_std_max_post)
8066 val = tp->rx_std_max_post;
8067 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8068 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8069 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8071 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
8072 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
8075 tw32(RCVBDI_STD_THRESH, val);
8077 /* Initialize TG3_BDINFO's at:
8078 * RCVDBDI_STD_BD: standard eth size rx ring
8079 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8080 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8083 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8084 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8085 * ring attribute flags
8086 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8088 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8089 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8091 * The size of each ring is fixed in the firmware, but the location is
8094 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8095 ((u64) tpr->rx_std_mapping >> 32));
8096 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8097 ((u64) tpr->rx_std_mapping & 0xffffffff));
8098 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
8099 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8100 NIC_SRAM_RX_BUFFER_DESC);
8102 /* Disable the mini ring */
8103 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8104 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8105 BDINFO_FLAGS_DISABLED);
8107 /* Program the jumbo buffer descriptor ring control
8108 * blocks on those devices that have them.
8110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8111 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
8112 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
8113 /* Setup replenish threshold. */
8114 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
8116 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
8117 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8118 ((u64) tpr->rx_jmb_mapping >> 32));
8119 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8120 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8121 val = TG3_RX_JMB_RING_SIZE(tp) <<
8122 BDINFO_FLAGS_MAXLEN_SHIFT;
8123 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8124 val | BDINFO_FLAGS_USE_EXT_RECV);
8125 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
8126 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8127 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8128 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8130 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8131 BDINFO_FLAGS_DISABLED);
8134 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
8135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8136 val = TG3_RX_STD_MAX_SIZE_5700;
8138 val = TG3_RX_STD_MAX_SIZE_5717;
8139 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8140 val |= (TG3_RX_STD_DMA_SZ << 2);
8142 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8144 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8146 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8148 tpr->rx_std_prod_idx = tp->rx_pending;
8149 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8151 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
8152 tp->rx_jumbo_pending : 0;
8153 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8155 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
8156 tw32(STD_REPLENISH_LWM, 32);
8157 tw32(JMB_REPLENISH_LWM, 16);
8160 tg3_rings_reset(tp);
8162 /* Initialize MAC address and backoff seed. */
8163 __tg3_set_mac_addr(tp, 0);
8165 /* MTU + ethernet header + FCS + optional VLAN tag */
8166 tw32(MAC_RX_MTU_SIZE,
8167 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8169 /* The slot time is changed by tg3_setup_phy if we
8170 * run at gigabit with half duplex.
8172 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8173 (6 << TX_LENGTHS_IPG_SHIFT) |
8174 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8176 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8177 val |= tr32(MAC_TX_LENGTHS) &
8178 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8179 TX_LENGTHS_CNT_DWN_VAL_MSK);
8181 tw32(MAC_TX_LENGTHS, val);
8183 /* Receive rules. */
8184 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8185 tw32(RCVLPC_CONFIG, 0x0181);
8187 /* Calculate RDMAC_MODE setting early, we need it to determine
8188 * the RCVLPC_STATE_ENABLE mask.
8190 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8191 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8192 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8193 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8194 RDMAC_MODE_LNGREAD_ENAB);
8196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8197 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8199 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8200 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8201 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8202 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8203 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8204 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8206 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8207 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8208 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
8209 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8210 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8211 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8212 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8213 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8217 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
8218 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8220 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8221 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8223 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
8224 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8225 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8226 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8228 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8229 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8231 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8232 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8233 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8234 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8235 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
8236 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8237 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8238 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8239 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8240 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8241 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8242 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8243 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8244 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8246 tw32(TG3_RDMA_RSRVCTRL_REG,
8247 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8250 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8251 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8252 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8253 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8254 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8255 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8258 /* Receive/send statistics. */
8259 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8260 val = tr32(RCVLPC_STATS_ENABLE);
8261 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8262 tw32(RCVLPC_STATS_ENABLE, val);
8263 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8264 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8265 val = tr32(RCVLPC_STATS_ENABLE);
8266 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8267 tw32(RCVLPC_STATS_ENABLE, val);
8269 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8271 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8272 tw32(SNDDATAI_STATSENAB, 0xffffff);
8273 tw32(SNDDATAI_STATSCTRL,
8274 (SNDDATAI_SCTRL_ENABLE |
8275 SNDDATAI_SCTRL_FASTUPD));
8277 /* Setup host coalescing engine. */
8278 tw32(HOSTCC_MODE, 0);
8279 for (i = 0; i < 2000; i++) {
8280 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8285 __tg3_set_coalesce(tp, &tp->coal);
8287 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8288 /* Status/statistics block address. See tg3_timer,
8289 * the tg3_periodic_fetch_stats call there, and
8290 * tg3_get_stats to see how this works for 5705/5750 chips.
8292 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8293 ((u64) tp->stats_mapping >> 32));
8294 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8295 ((u64) tp->stats_mapping & 0xffffffff));
8296 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8298 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8300 /* Clear statistics and status block memory areas */
8301 for (i = NIC_SRAM_STATS_BLK;
8302 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8304 tg3_write_mem(tp, i, 0);
8309 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8311 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8312 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8313 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8314 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8316 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8317 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8318 /* reset to prevent losing 1st rx packet intermittently */
8319 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8323 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8324 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8327 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8328 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8329 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8330 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8331 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8332 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8333 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8336 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8337 * If TG3_FLG2_IS_NIC is zero, we should read the
8338 * register to preserve the GPIO settings for LOMs. The GPIOs,
8339 * whether used as inputs or outputs, are set by boot code after
8342 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8345 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8346 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8347 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8350 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8351 GRC_LCLCTRL_GPIO_OUTPUT3;
8353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8354 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8356 tp->grc_local_ctrl &= ~gpio_mask;
8357 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8359 /* GPIO1 must be driven high for eeprom write protect */
8360 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8361 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8362 GRC_LCLCTRL_GPIO_OUTPUT1);
8364 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8367 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
8369 val = tr32(MSGINT_MODE);
8370 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8371 tw32(MSGINT_MODE, val);
8374 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8375 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8379 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8380 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8381 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8382 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8383 WDMAC_MODE_LNGREAD_ENAB);
8385 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8386 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8387 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8388 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8389 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8391 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8392 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8393 val |= WDMAC_MODE_RX_ACCEL;
8397 /* Enable host coalescing bug fix */
8398 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8399 val |= WDMAC_MODE_STATUS_TAG_FIX;
8401 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8402 val |= WDMAC_MODE_BURST_ALL_DATA;
8404 tw32_f(WDMAC_MODE, val);
8407 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8410 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8412 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8413 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8414 pcix_cmd |= PCI_X_CMD_READ_2K;
8415 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8416 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8417 pcix_cmd |= PCI_X_CMD_READ_2K;
8419 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8423 tw32_f(RDMAC_MODE, rdmac_mode);
8426 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8427 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8428 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8430 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8432 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8434 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8436 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8437 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8438 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8439 if (tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP)
8440 val |= RCVDBDI_MODE_LRG_RING_SZ;
8441 tw32(RCVDBDI_MODE, val);
8442 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8443 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8444 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8445 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8446 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8447 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8448 tw32(SNDBDI_MODE, val);
8449 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8451 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8452 err = tg3_load_5701_a0_firmware_fix(tp);
8457 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8458 err = tg3_load_tso_firmware(tp);
8463 tp->tx_mode = TX_MODE_ENABLE;
8465 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8466 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8467 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8470 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8471 tp->tx_mode &= ~val;
8472 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8475 tw32_f(MAC_TX_MODE, tp->tx_mode);
8478 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8479 u32 reg = MAC_RSS_INDIR_TBL_0;
8480 u8 *ent = (u8 *)&val;
8482 /* Setup the indirection table */
8483 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8484 int idx = i % sizeof(val);
8486 ent[idx] = i % (tp->irq_cnt - 1);
8487 if (idx == sizeof(val) - 1) {
8493 /* Setup the "secret" hash key. */
8494 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8495 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8496 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8497 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8498 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8499 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8500 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8501 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8502 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8503 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8506 tp->rx_mode = RX_MODE_ENABLE;
8507 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8508 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8510 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8511 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8512 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8513 RX_MODE_RSS_IPV6_HASH_EN |
8514 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8515 RX_MODE_RSS_IPV4_HASH_EN |
8516 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8518 tw32_f(MAC_RX_MODE, tp->rx_mode);
8521 tw32(MAC_LED_CTRL, tp->led_ctrl);
8523 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8524 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8525 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8528 tw32_f(MAC_RX_MODE, tp->rx_mode);
8531 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8532 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8533 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8534 /* Set drive transmission level to 1.2V */
8535 /* only if the signal pre-emphasis bit is not set */
8536 val = tr32(MAC_SERDES_CFG);
8539 tw32(MAC_SERDES_CFG, val);
8541 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8542 tw32(MAC_SERDES_CFG, 0x616000);
8545 /* Prevent chip from dropping frames when flow control
8548 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8552 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8554 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8555 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8556 /* Use hardware link auto-negotiation */
8557 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8560 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8561 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8564 tmp = tr32(SERDES_RX_CTRL);
8565 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8566 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8567 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8568 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8571 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8572 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8573 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8574 tp->link_config.speed = tp->link_config.orig_speed;
8575 tp->link_config.duplex = tp->link_config.orig_duplex;
8576 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8579 err = tg3_setup_phy(tp, 0);
8583 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8584 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8587 /* Clear CRC stats. */
8588 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8589 tg3_writephy(tp, MII_TG3_TEST1,
8590 tmp | MII_TG3_TEST1_CRC_EN);
8591 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8596 __tg3_set_rx_mode(tp->dev);
8598 /* Initialize receive rules. */
8599 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8600 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8601 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8602 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8604 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8605 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8609 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8613 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8615 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8617 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8619 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8621 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8623 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8625 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8627 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8629 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8631 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8633 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8635 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8637 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8639 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8647 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8648 /* Write our heartbeat update interval to APE. */
8649 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8650 APE_HOST_HEARTBEAT_INT_DISABLE);
8652 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8657 /* Called at device open time to get the chip ready for
8658 * packet processing. Invoked with tp->lock held.
8660 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8662 tg3_switch_clocks(tp);
8664 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8666 return tg3_reset_hw(tp, reset_phy);
8669 #define TG3_STAT_ADD32(PSTAT, REG) \
8670 do { u32 __val = tr32(REG); \
8671 (PSTAT)->low += __val; \
8672 if ((PSTAT)->low < __val) \
8673 (PSTAT)->high += 1; \
8676 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8678 struct tg3_hw_stats *sp = tp->hw_stats;
8680 if (!netif_carrier_ok(tp->dev))
8683 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8684 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8685 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8686 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8687 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8688 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8689 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8690 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8691 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8692 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8693 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8694 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8695 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8697 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8698 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8699 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8700 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8701 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8702 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8703 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8704 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8705 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8706 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8707 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8708 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8709 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8710 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8712 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8713 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8714 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8717 static void tg3_timer(unsigned long __opaque)
8719 struct tg3 *tp = (struct tg3 *) __opaque;
8724 spin_lock(&tp->lock);
8726 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8727 /* All of this garbage is because when using non-tagged
8728 * IRQ status the mailbox/status_block protocol the chip
8729 * uses with the cpu is race prone.
8731 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8732 tw32(GRC_LOCAL_CTRL,
8733 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8735 tw32(HOSTCC_MODE, tp->coalesce_mode |
8736 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8739 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8740 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8741 spin_unlock(&tp->lock);
8742 schedule_work(&tp->reset_task);
8747 /* This part only runs once per second. */
8748 if (!--tp->timer_counter) {
8749 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8750 tg3_periodic_fetch_stats(tp);
8752 if (tp->setlpicnt && !--tp->setlpicnt) {
8753 u32 val = tr32(TG3_CPMU_EEE_MODE);
8754 tw32(TG3_CPMU_EEE_MODE,
8755 val | TG3_CPMU_EEEMD_LPI_ENABLE);
8758 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8762 mac_stat = tr32(MAC_STATUS);
8765 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8766 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8768 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8772 tg3_setup_phy(tp, 0);
8773 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8774 u32 mac_stat = tr32(MAC_STATUS);
8777 if (netif_carrier_ok(tp->dev) &&
8778 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8781 if (!netif_carrier_ok(tp->dev) &&
8782 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8783 MAC_STATUS_SIGNAL_DET))) {
8787 if (!tp->serdes_counter) {
8790 ~MAC_MODE_PORT_MODE_MASK));
8792 tw32_f(MAC_MODE, tp->mac_mode);
8795 tg3_setup_phy(tp, 0);
8797 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8798 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8799 tg3_serdes_parallel_detect(tp);
8802 tp->timer_counter = tp->timer_multiplier;
8805 /* Heartbeat is only sent once every 2 seconds.
8807 * The heartbeat is to tell the ASF firmware that the host
8808 * driver is still alive. In the event that the OS crashes,
8809 * ASF needs to reset the hardware to free up the FIFO space
8810 * that may be filled with rx packets destined for the host.
8811 * If the FIFO is full, ASF will no longer function properly.
8813 * Unintended resets have been reported on real time kernels
8814 * where the timer doesn't run on time. Netpoll will also have
8817 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8818 * to check the ring condition when the heartbeat is expiring
8819 * before doing the reset. This will prevent most unintended
8822 if (!--tp->asf_counter) {
8823 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8824 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8825 tg3_wait_for_event_ack(tp);
8827 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8828 FWCMD_NICDRV_ALIVE3);
8829 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8830 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8831 TG3_FW_UPDATE_TIMEOUT_SEC);
8833 tg3_generate_fw_event(tp);
8835 tp->asf_counter = tp->asf_multiplier;
8838 spin_unlock(&tp->lock);
8841 tp->timer.expires = jiffies + tp->timer_offset;
8842 add_timer(&tp->timer);
8845 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8848 unsigned long flags;
8850 struct tg3_napi *tnapi = &tp->napi[irq_num];
8852 if (tp->irq_cnt == 1)
8853 name = tp->dev->name;
8855 name = &tnapi->irq_lbl[0];
8856 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8857 name[IFNAMSIZ-1] = 0;
8860 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8862 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8867 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8868 fn = tg3_interrupt_tagged;
8869 flags = IRQF_SHARED;
8872 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8875 static int tg3_test_interrupt(struct tg3 *tp)
8877 struct tg3_napi *tnapi = &tp->napi[0];
8878 struct net_device *dev = tp->dev;
8879 int err, i, intr_ok = 0;
8882 if (!netif_running(dev))
8885 tg3_disable_ints(tp);
8887 free_irq(tnapi->irq_vec, tnapi);
8890 * Turn off MSI one shot mode. Otherwise this test has no
8891 * observable way to know whether the interrupt was delivered.
8893 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
8894 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8895 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8896 tw32(MSGINT_MODE, val);
8899 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8900 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8904 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8905 tg3_enable_ints(tp);
8907 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8910 for (i = 0; i < 5; i++) {
8911 u32 int_mbox, misc_host_ctrl;
8913 int_mbox = tr32_mailbox(tnapi->int_mbox);
8914 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8916 if ((int_mbox != 0) ||
8917 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8925 tg3_disable_ints(tp);
8927 free_irq(tnapi->irq_vec, tnapi);
8929 err = tg3_request_irq(tp, 0);
8935 /* Reenable MSI one shot mode. */
8936 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
8937 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8938 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8939 tw32(MSGINT_MODE, val);
8947 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8948 * successfully restored
8950 static int tg3_test_msi(struct tg3 *tp)
8955 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8958 /* Turn off SERR reporting in case MSI terminates with Master
8961 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8962 pci_write_config_word(tp->pdev, PCI_COMMAND,
8963 pci_cmd & ~PCI_COMMAND_SERR);
8965 err = tg3_test_interrupt(tp);
8967 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8972 /* other failures */
8976 /* MSI test failed, go back to INTx mode */
8977 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
8978 "to INTx mode. Please report this failure to the PCI "
8979 "maintainer and include system chipset information\n");
8981 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8983 pci_disable_msi(tp->pdev);
8985 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8986 tp->napi[0].irq_vec = tp->pdev->irq;
8988 err = tg3_request_irq(tp, 0);
8992 /* Need to reset the chip because the MSI cycle may have terminated
8993 * with Master Abort.
8995 tg3_full_lock(tp, 1);
8997 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8998 err = tg3_init_hw(tp, 1);
9000 tg3_full_unlock(tp);
9003 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9008 static int tg3_request_firmware(struct tg3 *tp)
9010 const __be32 *fw_data;
9012 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9013 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9018 fw_data = (void *)tp->fw->data;
9020 /* Firmware blob starts with version numbers, followed by
9021 * start address and _full_ length including BSS sections
9022 * (which must be longer than the actual data, of course
9025 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9026 if (tp->fw_len < (tp->fw->size - 12)) {
9027 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9028 tp->fw_len, tp->fw_needed);
9029 release_firmware(tp->fw);
9034 /* We no longer need firmware; we have it. */
9035 tp->fw_needed = NULL;
9039 static bool tg3_enable_msix(struct tg3 *tp)
9041 int i, rc, cpus = num_online_cpus();
9042 struct msix_entry msix_ent[tp->irq_max];
9045 /* Just fallback to the simpler MSI mode. */
9049 * We want as many rx rings enabled as there are cpus.
9050 * The first MSIX vector only deals with link interrupts, etc,
9051 * so we add one to the number of vectors we are requesting.
9053 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9055 for (i = 0; i < tp->irq_max; i++) {
9056 msix_ent[i].entry = i;
9057 msix_ent[i].vector = 0;
9060 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9063 } else if (rc != 0) {
9064 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9066 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9071 for (i = 0; i < tp->irq_max; i++)
9072 tp->napi[i].irq_vec = msix_ent[i].vector;
9074 netif_set_real_num_tx_queues(tp->dev, 1);
9075 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9076 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9077 pci_disable_msix(tp->pdev);
9081 if (tp->irq_cnt > 1) {
9082 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
9084 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9085 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9086 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
9087 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9094 static void tg3_ints_init(struct tg3 *tp)
9096 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
9097 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
9098 /* All MSI supporting chips should support tagged
9099 * status. Assert that this is the case.
9101 netdev_warn(tp->dev,
9102 "MSI without TAGGED_STATUS? Not using MSI\n");
9106 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
9107 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
9108 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
9109 pci_enable_msi(tp->pdev) == 0)
9110 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
9112 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9113 u32 msi_mode = tr32(MSGINT_MODE);
9114 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
9116 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9117 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9120 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
9122 tp->napi[0].irq_vec = tp->pdev->irq;
9123 netif_set_real_num_tx_queues(tp->dev, 1);
9124 netif_set_real_num_rx_queues(tp->dev, 1);
9128 static void tg3_ints_fini(struct tg3 *tp)
9130 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
9131 pci_disable_msix(tp->pdev);
9132 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
9133 pci_disable_msi(tp->pdev);
9134 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
9135 tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
9138 static int tg3_open(struct net_device *dev)
9140 struct tg3 *tp = netdev_priv(dev);
9143 if (tp->fw_needed) {
9144 err = tg3_request_firmware(tp);
9145 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9149 netdev_warn(tp->dev, "TSO capability disabled\n");
9150 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
9151 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9152 netdev_notice(tp->dev, "TSO capability restored\n");
9153 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9157 netif_carrier_off(tp->dev);
9159 err = tg3_power_up(tp);
9163 tg3_full_lock(tp, 0);
9165 tg3_disable_ints(tp);
9166 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9168 tg3_full_unlock(tp);
9171 * Setup interrupts first so we know how
9172 * many NAPI resources to allocate
9176 /* The placement of this call is tied
9177 * to the setup and use of Host TX descriptors.
9179 err = tg3_alloc_consistent(tp);
9185 tg3_napi_enable(tp);
9187 for (i = 0; i < tp->irq_cnt; i++) {
9188 struct tg3_napi *tnapi = &tp->napi[i];
9189 err = tg3_request_irq(tp, i);
9191 for (i--; i >= 0; i--)
9192 free_irq(tnapi->irq_vec, tnapi);
9200 tg3_full_lock(tp, 0);
9202 err = tg3_init_hw(tp, 1);
9204 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9207 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
9208 tp->timer_offset = HZ;
9210 tp->timer_offset = HZ / 10;
9212 BUG_ON(tp->timer_offset > HZ);
9213 tp->timer_counter = tp->timer_multiplier =
9214 (HZ / tp->timer_offset);
9215 tp->asf_counter = tp->asf_multiplier =
9216 ((HZ / tp->timer_offset) * 2);
9218 init_timer(&tp->timer);
9219 tp->timer.expires = jiffies + tp->timer_offset;
9220 tp->timer.data = (unsigned long) tp;
9221 tp->timer.function = tg3_timer;
9224 tg3_full_unlock(tp);
9229 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
9230 err = tg3_test_msi(tp);
9233 tg3_full_lock(tp, 0);
9234 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9236 tg3_full_unlock(tp);
9241 if (!(tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
9242 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9243 u32 val = tr32(PCIE_TRANSACTION_CFG);
9245 tw32(PCIE_TRANSACTION_CFG,
9246 val | PCIE_TRANS_CFG_1SHOT_MSI);
9252 tg3_full_lock(tp, 0);
9254 add_timer(&tp->timer);
9255 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9256 tg3_enable_ints(tp);
9258 tg3_full_unlock(tp);
9260 netif_tx_start_all_queues(dev);
9265 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9266 struct tg3_napi *tnapi = &tp->napi[i];
9267 free_irq(tnapi->irq_vec, tnapi);
9271 tg3_napi_disable(tp);
9273 tg3_free_consistent(tp);
9280 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9281 struct rtnl_link_stats64 *);
9282 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9284 static int tg3_close(struct net_device *dev)
9287 struct tg3 *tp = netdev_priv(dev);
9289 tg3_napi_disable(tp);
9290 cancel_work_sync(&tp->reset_task);
9292 netif_tx_stop_all_queues(dev);
9294 del_timer_sync(&tp->timer);
9298 tg3_full_lock(tp, 1);
9300 tg3_disable_ints(tp);
9302 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9304 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9306 tg3_full_unlock(tp);
9308 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9309 struct tg3_napi *tnapi = &tp->napi[i];
9310 free_irq(tnapi->irq_vec, tnapi);
9315 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9317 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9318 sizeof(tp->estats_prev));
9322 tg3_free_consistent(tp);
9326 netif_carrier_off(tp->dev);
9331 static inline u64 get_stat64(tg3_stat64_t *val)
9333 return ((u64)val->high << 32) | ((u64)val->low);
9336 static u64 calc_crc_errors(struct tg3 *tp)
9338 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9340 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9341 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9342 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9345 spin_lock_bh(&tp->lock);
9346 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9347 tg3_writephy(tp, MII_TG3_TEST1,
9348 val | MII_TG3_TEST1_CRC_EN);
9349 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9352 spin_unlock_bh(&tp->lock);
9354 tp->phy_crc_errors += val;
9356 return tp->phy_crc_errors;
9359 return get_stat64(&hw_stats->rx_fcs_errors);
9362 #define ESTAT_ADD(member) \
9363 estats->member = old_estats->member + \
9364 get_stat64(&hw_stats->member)
9366 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9368 struct tg3_ethtool_stats *estats = &tp->estats;
9369 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9370 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9375 ESTAT_ADD(rx_octets);
9376 ESTAT_ADD(rx_fragments);
9377 ESTAT_ADD(rx_ucast_packets);
9378 ESTAT_ADD(rx_mcast_packets);
9379 ESTAT_ADD(rx_bcast_packets);
9380 ESTAT_ADD(rx_fcs_errors);
9381 ESTAT_ADD(rx_align_errors);
9382 ESTAT_ADD(rx_xon_pause_rcvd);
9383 ESTAT_ADD(rx_xoff_pause_rcvd);
9384 ESTAT_ADD(rx_mac_ctrl_rcvd);
9385 ESTAT_ADD(rx_xoff_entered);
9386 ESTAT_ADD(rx_frame_too_long_errors);
9387 ESTAT_ADD(rx_jabbers);
9388 ESTAT_ADD(rx_undersize_packets);
9389 ESTAT_ADD(rx_in_length_errors);
9390 ESTAT_ADD(rx_out_length_errors);
9391 ESTAT_ADD(rx_64_or_less_octet_packets);
9392 ESTAT_ADD(rx_65_to_127_octet_packets);
9393 ESTAT_ADD(rx_128_to_255_octet_packets);
9394 ESTAT_ADD(rx_256_to_511_octet_packets);
9395 ESTAT_ADD(rx_512_to_1023_octet_packets);
9396 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9397 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9398 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9399 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9400 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9402 ESTAT_ADD(tx_octets);
9403 ESTAT_ADD(tx_collisions);
9404 ESTAT_ADD(tx_xon_sent);
9405 ESTAT_ADD(tx_xoff_sent);
9406 ESTAT_ADD(tx_flow_control);
9407 ESTAT_ADD(tx_mac_errors);
9408 ESTAT_ADD(tx_single_collisions);
9409 ESTAT_ADD(tx_mult_collisions);
9410 ESTAT_ADD(tx_deferred);
9411 ESTAT_ADD(tx_excessive_collisions);
9412 ESTAT_ADD(tx_late_collisions);
9413 ESTAT_ADD(tx_collide_2times);
9414 ESTAT_ADD(tx_collide_3times);
9415 ESTAT_ADD(tx_collide_4times);
9416 ESTAT_ADD(tx_collide_5times);
9417 ESTAT_ADD(tx_collide_6times);
9418 ESTAT_ADD(tx_collide_7times);
9419 ESTAT_ADD(tx_collide_8times);
9420 ESTAT_ADD(tx_collide_9times);
9421 ESTAT_ADD(tx_collide_10times);
9422 ESTAT_ADD(tx_collide_11times);
9423 ESTAT_ADD(tx_collide_12times);
9424 ESTAT_ADD(tx_collide_13times);
9425 ESTAT_ADD(tx_collide_14times);
9426 ESTAT_ADD(tx_collide_15times);
9427 ESTAT_ADD(tx_ucast_packets);
9428 ESTAT_ADD(tx_mcast_packets);
9429 ESTAT_ADD(tx_bcast_packets);
9430 ESTAT_ADD(tx_carrier_sense_errors);
9431 ESTAT_ADD(tx_discards);
9432 ESTAT_ADD(tx_errors);
9434 ESTAT_ADD(dma_writeq_full);
9435 ESTAT_ADD(dma_write_prioq_full);
9436 ESTAT_ADD(rxbds_empty);
9437 ESTAT_ADD(rx_discards);
9438 ESTAT_ADD(rx_errors);
9439 ESTAT_ADD(rx_threshold_hit);
9441 ESTAT_ADD(dma_readq_full);
9442 ESTAT_ADD(dma_read_prioq_full);
9443 ESTAT_ADD(tx_comp_queue_full);
9445 ESTAT_ADD(ring_set_send_prod_index);
9446 ESTAT_ADD(ring_status_update);
9447 ESTAT_ADD(nic_irqs);
9448 ESTAT_ADD(nic_avoided_irqs);
9449 ESTAT_ADD(nic_tx_threshold_hit);
9454 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9455 struct rtnl_link_stats64 *stats)
9457 struct tg3 *tp = netdev_priv(dev);
9458 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9459 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9464 stats->rx_packets = old_stats->rx_packets +
9465 get_stat64(&hw_stats->rx_ucast_packets) +
9466 get_stat64(&hw_stats->rx_mcast_packets) +
9467 get_stat64(&hw_stats->rx_bcast_packets);
9469 stats->tx_packets = old_stats->tx_packets +
9470 get_stat64(&hw_stats->tx_ucast_packets) +
9471 get_stat64(&hw_stats->tx_mcast_packets) +
9472 get_stat64(&hw_stats->tx_bcast_packets);
9474 stats->rx_bytes = old_stats->rx_bytes +
9475 get_stat64(&hw_stats->rx_octets);
9476 stats->tx_bytes = old_stats->tx_bytes +
9477 get_stat64(&hw_stats->tx_octets);
9479 stats->rx_errors = old_stats->rx_errors +
9480 get_stat64(&hw_stats->rx_errors);
9481 stats->tx_errors = old_stats->tx_errors +
9482 get_stat64(&hw_stats->tx_errors) +
9483 get_stat64(&hw_stats->tx_mac_errors) +
9484 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9485 get_stat64(&hw_stats->tx_discards);
9487 stats->multicast = old_stats->multicast +
9488 get_stat64(&hw_stats->rx_mcast_packets);
9489 stats->collisions = old_stats->collisions +
9490 get_stat64(&hw_stats->tx_collisions);
9492 stats->rx_length_errors = old_stats->rx_length_errors +
9493 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9494 get_stat64(&hw_stats->rx_undersize_packets);
9496 stats->rx_over_errors = old_stats->rx_over_errors +
9497 get_stat64(&hw_stats->rxbds_empty);
9498 stats->rx_frame_errors = old_stats->rx_frame_errors +
9499 get_stat64(&hw_stats->rx_align_errors);
9500 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9501 get_stat64(&hw_stats->tx_discards);
9502 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9503 get_stat64(&hw_stats->tx_carrier_sense_errors);
9505 stats->rx_crc_errors = old_stats->rx_crc_errors +
9506 calc_crc_errors(tp);
9508 stats->rx_missed_errors = old_stats->rx_missed_errors +
9509 get_stat64(&hw_stats->rx_discards);
9511 stats->rx_dropped = tp->rx_dropped;
9516 static inline u32 calc_crc(unsigned char *buf, int len)
9524 for (j = 0; j < len; j++) {
9527 for (k = 0; k < 8; k++) {
9540 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9542 /* accept or reject all multicast frames */
9543 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9544 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9545 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9546 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9549 static void __tg3_set_rx_mode(struct net_device *dev)
9551 struct tg3 *tp = netdev_priv(dev);
9554 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9555 RX_MODE_KEEP_VLAN_TAG);
9557 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9558 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9561 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9562 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9565 if (dev->flags & IFF_PROMISC) {
9566 /* Promiscuous mode. */
9567 rx_mode |= RX_MODE_PROMISC;
9568 } else if (dev->flags & IFF_ALLMULTI) {
9569 /* Accept all multicast. */
9570 tg3_set_multi(tp, 1);
9571 } else if (netdev_mc_empty(dev)) {
9572 /* Reject all multicast. */
9573 tg3_set_multi(tp, 0);
9575 /* Accept one or more multicast(s). */
9576 struct netdev_hw_addr *ha;
9577 u32 mc_filter[4] = { 0, };
9582 netdev_for_each_mc_addr(ha, dev) {
9583 crc = calc_crc(ha->addr, ETH_ALEN);
9585 regidx = (bit & 0x60) >> 5;
9587 mc_filter[regidx] |= (1 << bit);
9590 tw32(MAC_HASH_REG_0, mc_filter[0]);
9591 tw32(MAC_HASH_REG_1, mc_filter[1]);
9592 tw32(MAC_HASH_REG_2, mc_filter[2]);
9593 tw32(MAC_HASH_REG_3, mc_filter[3]);
9596 if (rx_mode != tp->rx_mode) {
9597 tp->rx_mode = rx_mode;
9598 tw32_f(MAC_RX_MODE, rx_mode);
9603 static void tg3_set_rx_mode(struct net_device *dev)
9605 struct tg3 *tp = netdev_priv(dev);
9607 if (!netif_running(dev))
9610 tg3_full_lock(tp, 0);
9611 __tg3_set_rx_mode(dev);
9612 tg3_full_unlock(tp);
9615 #define TG3_REGDUMP_LEN (32 * 1024)
9617 static int tg3_get_regs_len(struct net_device *dev)
9619 return TG3_REGDUMP_LEN;
9622 static void tg3_get_regs(struct net_device *dev,
9623 struct ethtool_regs *regs, void *_p)
9626 struct tg3 *tp = netdev_priv(dev);
9632 memset(p, 0, TG3_REGDUMP_LEN);
9634 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9637 tg3_full_lock(tp, 0);
9639 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9640 #define GET_REG32_LOOP(base, len) \
9641 do { p = (u32 *)(orig_p + (base)); \
9642 for (i = 0; i < len; i += 4) \
9643 __GET_REG32((base) + i); \
9645 #define GET_REG32_1(reg) \
9646 do { p = (u32 *)(orig_p + (reg)); \
9647 __GET_REG32((reg)); \
9650 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9651 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9652 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9653 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9654 GET_REG32_1(SNDDATAC_MODE);
9655 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9656 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9657 GET_REG32_1(SNDBDC_MODE);
9658 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9659 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9660 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9661 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9662 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9663 GET_REG32_1(RCVDCC_MODE);
9664 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9665 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9666 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9667 GET_REG32_1(MBFREE_MODE);
9668 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9669 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9670 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9671 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9672 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9673 GET_REG32_1(RX_CPU_MODE);
9674 GET_REG32_1(RX_CPU_STATE);
9675 GET_REG32_1(RX_CPU_PGMCTR);
9676 GET_REG32_1(RX_CPU_HWBKPT);
9677 GET_REG32_1(TX_CPU_MODE);
9678 GET_REG32_1(TX_CPU_STATE);
9679 GET_REG32_1(TX_CPU_PGMCTR);
9680 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9681 GET_REG32_LOOP(FTQ_RESET, 0x120);
9682 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9683 GET_REG32_1(DMAC_MODE);
9684 GET_REG32_LOOP(GRC_MODE, 0x4c);
9685 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9686 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9689 #undef GET_REG32_LOOP
9692 tg3_full_unlock(tp);
9695 static int tg3_get_eeprom_len(struct net_device *dev)
9697 struct tg3 *tp = netdev_priv(dev);
9699 return tp->nvram_size;
9702 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9704 struct tg3 *tp = netdev_priv(dev);
9707 u32 i, offset, len, b_offset, b_count;
9710 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9713 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9716 offset = eeprom->offset;
9720 eeprom->magic = TG3_EEPROM_MAGIC;
9723 /* adjustments to start on required 4 byte boundary */
9724 b_offset = offset & 3;
9725 b_count = 4 - b_offset;
9726 if (b_count > len) {
9727 /* i.e. offset=1 len=2 */
9730 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9733 memcpy(data, ((char *)&val) + b_offset, b_count);
9736 eeprom->len += b_count;
9739 /* read bytes upto the last 4 byte boundary */
9740 pd = &data[eeprom->len];
9741 for (i = 0; i < (len - (len & 3)); i += 4) {
9742 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9747 memcpy(pd + i, &val, 4);
9752 /* read last bytes not ending on 4 byte boundary */
9753 pd = &data[eeprom->len];
9755 b_offset = offset + len - b_count;
9756 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9759 memcpy(pd, &val, b_count);
9760 eeprom->len += b_count;
9765 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9767 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9769 struct tg3 *tp = netdev_priv(dev);
9771 u32 offset, len, b_offset, odd_len;
9775 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9778 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9779 eeprom->magic != TG3_EEPROM_MAGIC)
9782 offset = eeprom->offset;
9785 if ((b_offset = (offset & 3))) {
9786 /* adjustments to start on required 4 byte boundary */
9787 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9798 /* adjustments to end on required 4 byte boundary */
9800 len = (len + 3) & ~3;
9801 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9807 if (b_offset || odd_len) {
9808 buf = kmalloc(len, GFP_KERNEL);
9812 memcpy(buf, &start, 4);
9814 memcpy(buf+len-4, &end, 4);
9815 memcpy(buf + b_offset, data, eeprom->len);
9818 ret = tg3_nvram_write_block(tp, offset, len, buf);
9826 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9828 struct tg3 *tp = netdev_priv(dev);
9830 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9831 struct phy_device *phydev;
9832 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9834 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9835 return phy_ethtool_gset(phydev, cmd);
9838 cmd->supported = (SUPPORTED_Autoneg);
9840 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9841 cmd->supported |= (SUPPORTED_1000baseT_Half |
9842 SUPPORTED_1000baseT_Full);
9844 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9845 cmd->supported |= (SUPPORTED_100baseT_Half |
9846 SUPPORTED_100baseT_Full |
9847 SUPPORTED_10baseT_Half |
9848 SUPPORTED_10baseT_Full |
9850 cmd->port = PORT_TP;
9852 cmd->supported |= SUPPORTED_FIBRE;
9853 cmd->port = PORT_FIBRE;
9856 cmd->advertising = tp->link_config.advertising;
9857 if (netif_running(dev)) {
9858 cmd->speed = tp->link_config.active_speed;
9859 cmd->duplex = tp->link_config.active_duplex;
9861 cmd->speed = SPEED_INVALID;
9862 cmd->duplex = DUPLEX_INVALID;
9864 cmd->phy_address = tp->phy_addr;
9865 cmd->transceiver = XCVR_INTERNAL;
9866 cmd->autoneg = tp->link_config.autoneg;
9872 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9874 struct tg3 *tp = netdev_priv(dev);
9876 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9877 struct phy_device *phydev;
9878 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9880 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9881 return phy_ethtool_sset(phydev, cmd);
9884 if (cmd->autoneg != AUTONEG_ENABLE &&
9885 cmd->autoneg != AUTONEG_DISABLE)
9888 if (cmd->autoneg == AUTONEG_DISABLE &&
9889 cmd->duplex != DUPLEX_FULL &&
9890 cmd->duplex != DUPLEX_HALF)
9893 if (cmd->autoneg == AUTONEG_ENABLE) {
9894 u32 mask = ADVERTISED_Autoneg |
9896 ADVERTISED_Asym_Pause;
9898 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9899 mask |= ADVERTISED_1000baseT_Half |
9900 ADVERTISED_1000baseT_Full;
9902 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9903 mask |= ADVERTISED_100baseT_Half |
9904 ADVERTISED_100baseT_Full |
9905 ADVERTISED_10baseT_Half |
9906 ADVERTISED_10baseT_Full |
9909 mask |= ADVERTISED_FIBRE;
9911 if (cmd->advertising & ~mask)
9914 mask &= (ADVERTISED_1000baseT_Half |
9915 ADVERTISED_1000baseT_Full |
9916 ADVERTISED_100baseT_Half |
9917 ADVERTISED_100baseT_Full |
9918 ADVERTISED_10baseT_Half |
9919 ADVERTISED_10baseT_Full);
9921 cmd->advertising &= mask;
9923 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9924 if (cmd->speed != SPEED_1000)
9927 if (cmd->duplex != DUPLEX_FULL)
9930 if (cmd->speed != SPEED_100 &&
9931 cmd->speed != SPEED_10)
9936 tg3_full_lock(tp, 0);
9938 tp->link_config.autoneg = cmd->autoneg;
9939 if (cmd->autoneg == AUTONEG_ENABLE) {
9940 tp->link_config.advertising = (cmd->advertising |
9941 ADVERTISED_Autoneg);
9942 tp->link_config.speed = SPEED_INVALID;
9943 tp->link_config.duplex = DUPLEX_INVALID;
9945 tp->link_config.advertising = 0;
9946 tp->link_config.speed = cmd->speed;
9947 tp->link_config.duplex = cmd->duplex;
9950 tp->link_config.orig_speed = tp->link_config.speed;
9951 tp->link_config.orig_duplex = tp->link_config.duplex;
9952 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9954 if (netif_running(dev))
9955 tg3_setup_phy(tp, 1);
9957 tg3_full_unlock(tp);
9962 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9964 struct tg3 *tp = netdev_priv(dev);
9966 strcpy(info->driver, DRV_MODULE_NAME);
9967 strcpy(info->version, DRV_MODULE_VERSION);
9968 strcpy(info->fw_version, tp->fw_ver);
9969 strcpy(info->bus_info, pci_name(tp->pdev));
9972 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9974 struct tg3 *tp = netdev_priv(dev);
9976 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9977 device_can_wakeup(&tp->pdev->dev))
9978 wol->supported = WAKE_MAGIC;
9982 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9983 device_can_wakeup(&tp->pdev->dev))
9984 wol->wolopts = WAKE_MAGIC;
9985 memset(&wol->sopass, 0, sizeof(wol->sopass));
9988 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9990 struct tg3 *tp = netdev_priv(dev);
9991 struct device *dp = &tp->pdev->dev;
9993 if (wol->wolopts & ~WAKE_MAGIC)
9995 if ((wol->wolopts & WAKE_MAGIC) &&
9996 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9999 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10001 spin_lock_bh(&tp->lock);
10002 if (device_may_wakeup(dp))
10003 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10005 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10006 spin_unlock_bh(&tp->lock);
10012 static u32 tg3_get_msglevel(struct net_device *dev)
10014 struct tg3 *tp = netdev_priv(dev);
10015 return tp->msg_enable;
10018 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10020 struct tg3 *tp = netdev_priv(dev);
10021 tp->msg_enable = value;
10024 static int tg3_set_tso(struct net_device *dev, u32 value)
10026 struct tg3 *tp = netdev_priv(dev);
10028 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
10033 if ((dev->features & NETIF_F_IPV6_CSUM) &&
10034 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
10035 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
10037 dev->features |= NETIF_F_TSO6;
10038 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
10039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10040 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
10041 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
10042 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
10043 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
10044 dev->features |= NETIF_F_TSO_ECN;
10046 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
10048 return ethtool_op_set_tso(dev, value);
10051 static int tg3_nway_reset(struct net_device *dev)
10053 struct tg3 *tp = netdev_priv(dev);
10056 if (!netif_running(dev))
10059 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10062 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10063 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10065 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10069 spin_lock_bh(&tp->lock);
10071 tg3_readphy(tp, MII_BMCR, &bmcr);
10072 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10073 ((bmcr & BMCR_ANENABLE) ||
10074 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10075 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10079 spin_unlock_bh(&tp->lock);
10085 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10087 struct tg3 *tp = netdev_priv(dev);
10089 ering->rx_max_pending = tp->rx_std_ring_mask;
10090 ering->rx_mini_max_pending = 0;
10091 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10092 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10094 ering->rx_jumbo_max_pending = 0;
10096 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10098 ering->rx_pending = tp->rx_pending;
10099 ering->rx_mini_pending = 0;
10100 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10101 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10103 ering->rx_jumbo_pending = 0;
10105 ering->tx_pending = tp->napi[0].tx_pending;
10108 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10110 struct tg3 *tp = netdev_priv(dev);
10111 int i, irq_sync = 0, err = 0;
10113 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10114 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10115 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10116 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10117 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
10118 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10121 if (netif_running(dev)) {
10123 tg3_netif_stop(tp);
10127 tg3_full_lock(tp, irq_sync);
10129 tp->rx_pending = ering->rx_pending;
10131 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
10132 tp->rx_pending > 63)
10133 tp->rx_pending = 63;
10134 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10136 for (i = 0; i < tp->irq_max; i++)
10137 tp->napi[i].tx_pending = ering->tx_pending;
10139 if (netif_running(dev)) {
10140 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10141 err = tg3_restart_hw(tp, 1);
10143 tg3_netif_start(tp);
10146 tg3_full_unlock(tp);
10148 if (irq_sync && !err)
10154 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10156 struct tg3 *tp = netdev_priv(dev);
10158 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
10160 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10161 epause->rx_pause = 1;
10163 epause->rx_pause = 0;
10165 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10166 epause->tx_pause = 1;
10168 epause->tx_pause = 0;
10171 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10173 struct tg3 *tp = netdev_priv(dev);
10176 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10178 struct phy_device *phydev;
10180 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10182 if (!(phydev->supported & SUPPORTED_Pause) ||
10183 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10184 (epause->rx_pause != epause->tx_pause)))
10187 tp->link_config.flowctrl = 0;
10188 if (epause->rx_pause) {
10189 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10191 if (epause->tx_pause) {
10192 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10193 newadv = ADVERTISED_Pause;
10195 newadv = ADVERTISED_Pause |
10196 ADVERTISED_Asym_Pause;
10197 } else if (epause->tx_pause) {
10198 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10199 newadv = ADVERTISED_Asym_Pause;
10203 if (epause->autoneg)
10204 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10206 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10208 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10209 u32 oldadv = phydev->advertising &
10210 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10211 if (oldadv != newadv) {
10212 phydev->advertising &=
10213 ~(ADVERTISED_Pause |
10214 ADVERTISED_Asym_Pause);
10215 phydev->advertising |= newadv;
10216 if (phydev->autoneg) {
10218 * Always renegotiate the link to
10219 * inform our link partner of our
10220 * flow control settings, even if the
10221 * flow control is forced. Let
10222 * tg3_adjust_link() do the final
10223 * flow control setup.
10225 return phy_start_aneg(phydev);
10229 if (!epause->autoneg)
10230 tg3_setup_flow_control(tp, 0, 0);
10232 tp->link_config.orig_advertising &=
10233 ~(ADVERTISED_Pause |
10234 ADVERTISED_Asym_Pause);
10235 tp->link_config.orig_advertising |= newadv;
10240 if (netif_running(dev)) {
10241 tg3_netif_stop(tp);
10245 tg3_full_lock(tp, irq_sync);
10247 if (epause->autoneg)
10248 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10250 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10251 if (epause->rx_pause)
10252 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10254 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10255 if (epause->tx_pause)
10256 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10258 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10260 if (netif_running(dev)) {
10261 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10262 err = tg3_restart_hw(tp, 1);
10264 tg3_netif_start(tp);
10267 tg3_full_unlock(tp);
10273 static u32 tg3_get_rx_csum(struct net_device *dev)
10275 struct tg3 *tp = netdev_priv(dev);
10276 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
10279 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10281 struct tg3 *tp = netdev_priv(dev);
10283 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10289 spin_lock_bh(&tp->lock);
10291 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10293 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10294 spin_unlock_bh(&tp->lock);
10299 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10301 struct tg3 *tp = netdev_priv(dev);
10303 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10309 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10310 ethtool_op_set_tx_ipv6_csum(dev, data);
10312 ethtool_op_set_tx_csum(dev, data);
10317 static int tg3_get_sset_count(struct net_device *dev, int sset)
10321 return TG3_NUM_TEST;
10323 return TG3_NUM_STATS;
10325 return -EOPNOTSUPP;
10329 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10331 switch (stringset) {
10333 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10336 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10339 WARN_ON(1); /* we need a WARN() */
10344 static int tg3_phys_id(struct net_device *dev, u32 data)
10346 struct tg3 *tp = netdev_priv(dev);
10349 if (!netif_running(tp->dev))
10353 data = UINT_MAX / 2;
10355 for (i = 0; i < (data * 2); i++) {
10357 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10358 LED_CTRL_1000MBPS_ON |
10359 LED_CTRL_100MBPS_ON |
10360 LED_CTRL_10MBPS_ON |
10361 LED_CTRL_TRAFFIC_OVERRIDE |
10362 LED_CTRL_TRAFFIC_BLINK |
10363 LED_CTRL_TRAFFIC_LED);
10366 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10367 LED_CTRL_TRAFFIC_OVERRIDE);
10369 if (msleep_interruptible(500))
10372 tw32(MAC_LED_CTRL, tp->led_ctrl);
10376 static void tg3_get_ethtool_stats(struct net_device *dev,
10377 struct ethtool_stats *estats, u64 *tmp_stats)
10379 struct tg3 *tp = netdev_priv(dev);
10380 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10383 #define NVRAM_TEST_SIZE 0x100
10384 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10385 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10386 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10387 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10388 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10390 static int tg3_test_nvram(struct tg3 *tp)
10394 int i, j, k, err = 0, size;
10396 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10399 if (tg3_nvram_read(tp, 0, &magic) != 0)
10402 if (magic == TG3_EEPROM_MAGIC)
10403 size = NVRAM_TEST_SIZE;
10404 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10405 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10406 TG3_EEPROM_SB_FORMAT_1) {
10407 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10408 case TG3_EEPROM_SB_REVISION_0:
10409 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10411 case TG3_EEPROM_SB_REVISION_2:
10412 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10414 case TG3_EEPROM_SB_REVISION_3:
10415 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10422 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10423 size = NVRAM_SELFBOOT_HW_SIZE;
10427 buf = kmalloc(size, GFP_KERNEL);
10432 for (i = 0, j = 0; i < size; i += 4, j++) {
10433 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10440 /* Selfboot format */
10441 magic = be32_to_cpu(buf[0]);
10442 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10443 TG3_EEPROM_MAGIC_FW) {
10444 u8 *buf8 = (u8 *) buf, csum8 = 0;
10446 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10447 TG3_EEPROM_SB_REVISION_2) {
10448 /* For rev 2, the csum doesn't include the MBA. */
10449 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10451 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10454 for (i = 0; i < size; i++)
10467 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10468 TG3_EEPROM_MAGIC_HW) {
10469 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10470 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10471 u8 *buf8 = (u8 *) buf;
10473 /* Separate the parity bits and the data bytes. */
10474 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10475 if ((i == 0) || (i == 8)) {
10479 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10480 parity[k++] = buf8[i] & msk;
10482 } else if (i == 16) {
10486 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10487 parity[k++] = buf8[i] & msk;
10490 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10491 parity[k++] = buf8[i] & msk;
10494 data[j++] = buf8[i];
10498 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10499 u8 hw8 = hweight8(data[i]);
10501 if ((hw8 & 0x1) && parity[i])
10503 else if (!(hw8 & 0x1) && !parity[i])
10512 /* Bootstrap checksum at offset 0x10 */
10513 csum = calc_crc((unsigned char *) buf, 0x10);
10514 if (csum != le32_to_cpu(buf[0x10/4]))
10517 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10518 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10519 if (csum != le32_to_cpu(buf[0xfc/4]))
10522 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
10523 /* The data is in little-endian format in NVRAM.
10524 * Use the big-endian read routines to preserve
10525 * the byte order as it exists in NVRAM.
10527 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &buf[i/4]))
10531 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10532 PCI_VPD_LRDT_RO_DATA);
10534 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10538 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10541 i += PCI_VPD_LRDT_TAG_SIZE;
10542 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10543 PCI_VPD_RO_KEYWORD_CHKSUM);
10547 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10549 for (i = 0; i <= j; i++)
10550 csum8 += ((u8 *)buf)[i];
10564 #define TG3_SERDES_TIMEOUT_SEC 2
10565 #define TG3_COPPER_TIMEOUT_SEC 6
10567 static int tg3_test_link(struct tg3 *tp)
10571 if (!netif_running(tp->dev))
10574 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10575 max = TG3_SERDES_TIMEOUT_SEC;
10577 max = TG3_COPPER_TIMEOUT_SEC;
10579 for (i = 0; i < max; i++) {
10580 if (netif_carrier_ok(tp->dev))
10583 if (msleep_interruptible(1000))
10590 /* Only test the commonly used registers */
10591 static int tg3_test_registers(struct tg3 *tp)
10593 int i, is_5705, is_5750;
10594 u32 offset, read_mask, write_mask, val, save_val, read_val;
10598 #define TG3_FL_5705 0x1
10599 #define TG3_FL_NOT_5705 0x2
10600 #define TG3_FL_NOT_5788 0x4
10601 #define TG3_FL_NOT_5750 0x8
10605 /* MAC Control Registers */
10606 { MAC_MODE, TG3_FL_NOT_5705,
10607 0x00000000, 0x00ef6f8c },
10608 { MAC_MODE, TG3_FL_5705,
10609 0x00000000, 0x01ef6b8c },
10610 { MAC_STATUS, TG3_FL_NOT_5705,
10611 0x03800107, 0x00000000 },
10612 { MAC_STATUS, TG3_FL_5705,
10613 0x03800100, 0x00000000 },
10614 { MAC_ADDR_0_HIGH, 0x0000,
10615 0x00000000, 0x0000ffff },
10616 { MAC_ADDR_0_LOW, 0x0000,
10617 0x00000000, 0xffffffff },
10618 { MAC_RX_MTU_SIZE, 0x0000,
10619 0x00000000, 0x0000ffff },
10620 { MAC_TX_MODE, 0x0000,
10621 0x00000000, 0x00000070 },
10622 { MAC_TX_LENGTHS, 0x0000,
10623 0x00000000, 0x00003fff },
10624 { MAC_RX_MODE, TG3_FL_NOT_5705,
10625 0x00000000, 0x000007fc },
10626 { MAC_RX_MODE, TG3_FL_5705,
10627 0x00000000, 0x000007dc },
10628 { MAC_HASH_REG_0, 0x0000,
10629 0x00000000, 0xffffffff },
10630 { MAC_HASH_REG_1, 0x0000,
10631 0x00000000, 0xffffffff },
10632 { MAC_HASH_REG_2, 0x0000,
10633 0x00000000, 0xffffffff },
10634 { MAC_HASH_REG_3, 0x0000,
10635 0x00000000, 0xffffffff },
10637 /* Receive Data and Receive BD Initiator Control Registers. */
10638 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10639 0x00000000, 0xffffffff },
10640 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10641 0x00000000, 0xffffffff },
10642 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10643 0x00000000, 0x00000003 },
10644 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10645 0x00000000, 0xffffffff },
10646 { RCVDBDI_STD_BD+0, 0x0000,
10647 0x00000000, 0xffffffff },
10648 { RCVDBDI_STD_BD+4, 0x0000,
10649 0x00000000, 0xffffffff },
10650 { RCVDBDI_STD_BD+8, 0x0000,
10651 0x00000000, 0xffff0002 },
10652 { RCVDBDI_STD_BD+0xc, 0x0000,
10653 0x00000000, 0xffffffff },
10655 /* Receive BD Initiator Control Registers. */
10656 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10657 0x00000000, 0xffffffff },
10658 { RCVBDI_STD_THRESH, TG3_FL_5705,
10659 0x00000000, 0x000003ff },
10660 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10661 0x00000000, 0xffffffff },
10663 /* Host Coalescing Control Registers. */
10664 { HOSTCC_MODE, TG3_FL_NOT_5705,
10665 0x00000000, 0x00000004 },
10666 { HOSTCC_MODE, TG3_FL_5705,
10667 0x00000000, 0x000000f6 },
10668 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10669 0x00000000, 0xffffffff },
10670 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10671 0x00000000, 0x000003ff },
10672 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10673 0x00000000, 0xffffffff },
10674 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10675 0x00000000, 0x000003ff },
10676 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10677 0x00000000, 0xffffffff },
10678 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10679 0x00000000, 0x000000ff },
10680 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10681 0x00000000, 0xffffffff },
10682 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10683 0x00000000, 0x000000ff },
10684 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10685 0x00000000, 0xffffffff },
10686 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10687 0x00000000, 0xffffffff },
10688 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10689 0x00000000, 0xffffffff },
10690 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10691 0x00000000, 0x000000ff },
10692 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10693 0x00000000, 0xffffffff },
10694 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10695 0x00000000, 0x000000ff },
10696 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10697 0x00000000, 0xffffffff },
10698 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10699 0x00000000, 0xffffffff },
10700 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10701 0x00000000, 0xffffffff },
10702 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10703 0x00000000, 0xffffffff },
10704 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10705 0x00000000, 0xffffffff },
10706 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10707 0xffffffff, 0x00000000 },
10708 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10709 0xffffffff, 0x00000000 },
10711 /* Buffer Manager Control Registers. */
10712 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10713 0x00000000, 0x007fff80 },
10714 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10715 0x00000000, 0x007fffff },
10716 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10717 0x00000000, 0x0000003f },
10718 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10719 0x00000000, 0x000001ff },
10720 { BUFMGR_MB_HIGH_WATER, 0x0000,
10721 0x00000000, 0x000001ff },
10722 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10723 0xffffffff, 0x00000000 },
10724 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10725 0xffffffff, 0x00000000 },
10727 /* Mailbox Registers */
10728 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10729 0x00000000, 0x000001ff },
10730 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10731 0x00000000, 0x000001ff },
10732 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10733 0x00000000, 0x000007ff },
10734 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10735 0x00000000, 0x000001ff },
10737 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10740 is_5705 = is_5750 = 0;
10741 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10743 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10747 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10748 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10751 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10754 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10755 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10758 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10761 offset = (u32) reg_tbl[i].offset;
10762 read_mask = reg_tbl[i].read_mask;
10763 write_mask = reg_tbl[i].write_mask;
10765 /* Save the original register content */
10766 save_val = tr32(offset);
10768 /* Determine the read-only value. */
10769 read_val = save_val & read_mask;
10771 /* Write zero to the register, then make sure the read-only bits
10772 * are not changed and the read/write bits are all zeros.
10776 val = tr32(offset);
10778 /* Test the read-only and read/write bits. */
10779 if (((val & read_mask) != read_val) || (val & write_mask))
10782 /* Write ones to all the bits defined by RdMask and WrMask, then
10783 * make sure the read-only bits are not changed and the
10784 * read/write bits are all ones.
10786 tw32(offset, read_mask | write_mask);
10788 val = tr32(offset);
10790 /* Test the read-only bits. */
10791 if ((val & read_mask) != read_val)
10794 /* Test the read/write bits. */
10795 if ((val & write_mask) != write_mask)
10798 tw32(offset, save_val);
10804 if (netif_msg_hw(tp))
10805 netdev_err(tp->dev,
10806 "Register test failed at offset %x\n", offset);
10807 tw32(offset, save_val);
10811 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10813 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10817 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10818 for (j = 0; j < len; j += 4) {
10821 tg3_write_mem(tp, offset + j, test_pattern[i]);
10822 tg3_read_mem(tp, offset + j, &val);
10823 if (val != test_pattern[i])
10830 static int tg3_test_memory(struct tg3 *tp)
10832 static struct mem_entry {
10835 } mem_tbl_570x[] = {
10836 { 0x00000000, 0x00b50},
10837 { 0x00002000, 0x1c000},
10838 { 0xffffffff, 0x00000}
10839 }, mem_tbl_5705[] = {
10840 { 0x00000100, 0x0000c},
10841 { 0x00000200, 0x00008},
10842 { 0x00004000, 0x00800},
10843 { 0x00006000, 0x01000},
10844 { 0x00008000, 0x02000},
10845 { 0x00010000, 0x0e000},
10846 { 0xffffffff, 0x00000}
10847 }, mem_tbl_5755[] = {
10848 { 0x00000200, 0x00008},
10849 { 0x00004000, 0x00800},
10850 { 0x00006000, 0x00800},
10851 { 0x00008000, 0x02000},
10852 { 0x00010000, 0x0c000},
10853 { 0xffffffff, 0x00000}
10854 }, mem_tbl_5906[] = {
10855 { 0x00000200, 0x00008},
10856 { 0x00004000, 0x00400},
10857 { 0x00006000, 0x00400},
10858 { 0x00008000, 0x01000},
10859 { 0x00010000, 0x01000},
10860 { 0xffffffff, 0x00000}
10861 }, mem_tbl_5717[] = {
10862 { 0x00000200, 0x00008},
10863 { 0x00010000, 0x0a000},
10864 { 0x00020000, 0x13c00},
10865 { 0xffffffff, 0x00000}
10866 }, mem_tbl_57765[] = {
10867 { 0x00000200, 0x00008},
10868 { 0x00004000, 0x00800},
10869 { 0x00006000, 0x09800},
10870 { 0x00010000, 0x0a000},
10871 { 0xffffffff, 0x00000}
10873 struct mem_entry *mem_tbl;
10877 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
10878 mem_tbl = mem_tbl_5717;
10879 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10880 mem_tbl = mem_tbl_57765;
10881 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10882 mem_tbl = mem_tbl_5755;
10883 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10884 mem_tbl = mem_tbl_5906;
10885 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10886 mem_tbl = mem_tbl_5705;
10888 mem_tbl = mem_tbl_570x;
10890 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10891 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10899 #define TG3_MAC_LOOPBACK 0
10900 #define TG3_PHY_LOOPBACK 1
10902 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10904 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10905 u32 desc_idx, coal_now;
10906 struct sk_buff *skb, *rx_skb;
10909 int num_pkts, tx_len, rx_len, i, err;
10910 struct tg3_rx_buffer_desc *desc;
10911 struct tg3_napi *tnapi, *rnapi;
10912 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10914 tnapi = &tp->napi[0];
10915 rnapi = &tp->napi[0];
10916 if (tp->irq_cnt > 1) {
10917 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
10918 rnapi = &tp->napi[1];
10919 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10920 tnapi = &tp->napi[1];
10922 coal_now = tnapi->coal_now | rnapi->coal_now;
10924 if (loopback_mode == TG3_MAC_LOOPBACK) {
10925 /* HW errata - mac loopback fails in some cases on 5780.
10926 * Normal traffic and PHY loopback are not affected by
10927 * errata. Also, the MAC loopback test is deprecated for
10928 * all newer ASIC revisions.
10930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10931 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
10934 mac_mode = tp->mac_mode &
10935 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10936 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
10937 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10938 mac_mode |= MAC_MODE_LINK_POLARITY;
10939 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
10940 mac_mode |= MAC_MODE_PORT_MODE_MII;
10942 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10943 tw32(MAC_MODE, mac_mode);
10944 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10947 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10948 tg3_phy_fet_toggle_apd(tp, false);
10949 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10951 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10953 tg3_phy_toggle_automdix(tp, 0);
10955 tg3_writephy(tp, MII_BMCR, val);
10958 mac_mode = tp->mac_mode &
10959 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10960 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10961 tg3_writephy(tp, MII_TG3_FET_PTEST,
10962 MII_TG3_FET_PTEST_FRC_TX_LINK |
10963 MII_TG3_FET_PTEST_FRC_TX_LOCK);
10964 /* The write needs to be flushed for the AC131 */
10965 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10966 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
10967 mac_mode |= MAC_MODE_PORT_MODE_MII;
10969 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10971 /* reset to prevent losing 1st rx packet intermittently */
10972 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10973 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10975 tw32_f(MAC_RX_MODE, tp->rx_mode);
10977 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10978 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
10979 if (masked_phy_id == TG3_PHY_ID_BCM5401)
10980 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10981 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
10982 mac_mode |= MAC_MODE_LINK_POLARITY;
10983 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10984 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10986 tw32(MAC_MODE, mac_mode);
10988 /* Wait for link */
10989 for (i = 0; i < 100; i++) {
10990 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11001 skb = netdev_alloc_skb(tp->dev, tx_len);
11005 tx_data = skb_put(skb, tx_len);
11006 memcpy(tx_data, tp->dev->dev_addr, 6);
11007 memset(tx_data + 6, 0x0, 8);
11009 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
11011 for (i = 14; i < tx_len; i++)
11012 tx_data[i] = (u8) (i & 0xff);
11014 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11015 if (pci_dma_mapping_error(tp->pdev, map)) {
11016 dev_kfree_skb(skb);
11020 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11025 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11029 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
11034 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11035 tr32_mailbox(tnapi->prodmbox);
11039 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11040 for (i = 0; i < 35; i++) {
11041 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11046 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11047 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11048 if ((tx_idx == tnapi->tx_prod) &&
11049 (rx_idx == (rx_start_idx + num_pkts)))
11053 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11054 dev_kfree_skb(skb);
11056 if (tx_idx != tnapi->tx_prod)
11059 if (rx_idx != rx_start_idx + num_pkts)
11062 desc = &rnapi->rx_rcb[rx_start_idx];
11063 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11064 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11065 if (opaque_key != RXD_OPAQUE_RING_STD)
11068 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11069 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11072 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
11073 if (rx_len != tx_len)
11076 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11078 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
11079 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
11081 for (i = 14; i < tx_len; i++) {
11082 if (*(rx_skb->data + i) != (u8) (i & 0xff))
11087 /* tg3_free_rings will unmap and free the rx_skb */
11092 #define TG3_MAC_LOOPBACK_FAILED 1
11093 #define TG3_PHY_LOOPBACK_FAILED 2
11094 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
11095 TG3_PHY_LOOPBACK_FAILED)
11097 static int tg3_test_loopback(struct tg3 *tp)
11100 u32 eee_cap, cpmuctrl = 0;
11102 if (!netif_running(tp->dev))
11103 return TG3_LOOPBACK_FAILED;
11105 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11106 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11108 err = tg3_reset_hw(tp, 1);
11110 err = TG3_LOOPBACK_FAILED;
11114 /* Turn off gphy autopowerdown. */
11115 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11116 tg3_phy_toggle_apd(tp, false);
11118 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
11122 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11124 /* Wait for up to 40 microseconds to acquire lock. */
11125 for (i = 0; i < 4; i++) {
11126 status = tr32(TG3_CPMU_MUTEX_GNT);
11127 if (status == CPMU_MUTEX_GNT_DRIVER)
11132 if (status != CPMU_MUTEX_GNT_DRIVER) {
11133 err = TG3_LOOPBACK_FAILED;
11137 /* Turn off link-based power management. */
11138 cpmuctrl = tr32(TG3_CPMU_CTRL);
11139 tw32(TG3_CPMU_CTRL,
11140 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11141 CPMU_CTRL_LINK_AWARE_MODE));
11144 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
11145 err |= TG3_MAC_LOOPBACK_FAILED;
11147 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
11148 tw32(TG3_CPMU_CTRL, cpmuctrl);
11150 /* Release the mutex */
11151 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11154 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11155 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
11156 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
11157 err |= TG3_PHY_LOOPBACK_FAILED;
11160 /* Re-enable gphy autopowerdown. */
11161 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11162 tg3_phy_toggle_apd(tp, true);
11165 tp->phy_flags |= eee_cap;
11170 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11173 struct tg3 *tp = netdev_priv(dev);
11175 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11178 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11180 if (tg3_test_nvram(tp) != 0) {
11181 etest->flags |= ETH_TEST_FL_FAILED;
11184 if (tg3_test_link(tp) != 0) {
11185 etest->flags |= ETH_TEST_FL_FAILED;
11188 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11189 int err, err2 = 0, irq_sync = 0;
11191 if (netif_running(dev)) {
11193 tg3_netif_stop(tp);
11197 tg3_full_lock(tp, irq_sync);
11199 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11200 err = tg3_nvram_lock(tp);
11201 tg3_halt_cpu(tp, RX_CPU_BASE);
11202 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11203 tg3_halt_cpu(tp, TX_CPU_BASE);
11205 tg3_nvram_unlock(tp);
11207 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11210 if (tg3_test_registers(tp) != 0) {
11211 etest->flags |= ETH_TEST_FL_FAILED;
11214 if (tg3_test_memory(tp) != 0) {
11215 etest->flags |= ETH_TEST_FL_FAILED;
11218 if ((data[4] = tg3_test_loopback(tp)) != 0)
11219 etest->flags |= ETH_TEST_FL_FAILED;
11221 tg3_full_unlock(tp);
11223 if (tg3_test_interrupt(tp) != 0) {
11224 etest->flags |= ETH_TEST_FL_FAILED;
11228 tg3_full_lock(tp, 0);
11230 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11231 if (netif_running(dev)) {
11232 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11233 err2 = tg3_restart_hw(tp, 1);
11235 tg3_netif_start(tp);
11238 tg3_full_unlock(tp);
11240 if (irq_sync && !err2)
11243 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11244 tg3_power_down(tp);
11248 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11250 struct mii_ioctl_data *data = if_mii(ifr);
11251 struct tg3 *tp = netdev_priv(dev);
11254 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
11255 struct phy_device *phydev;
11256 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11258 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11259 return phy_mii_ioctl(phydev, ifr, cmd);
11264 data->phy_id = tp->phy_addr;
11267 case SIOCGMIIREG: {
11270 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11271 break; /* We have no PHY */
11273 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11274 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11275 !netif_running(dev)))
11278 spin_lock_bh(&tp->lock);
11279 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11280 spin_unlock_bh(&tp->lock);
11282 data->val_out = mii_regval;
11288 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11289 break; /* We have no PHY */
11291 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11292 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11293 !netif_running(dev)))
11296 spin_lock_bh(&tp->lock);
11297 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11298 spin_unlock_bh(&tp->lock);
11306 return -EOPNOTSUPP;
11309 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11311 struct tg3 *tp = netdev_priv(dev);
11313 memcpy(ec, &tp->coal, sizeof(*ec));
11317 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11319 struct tg3 *tp = netdev_priv(dev);
11320 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11321 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11323 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11324 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11325 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11326 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11327 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11330 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11331 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11332 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11333 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11334 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11335 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11336 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11337 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11338 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11339 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11342 /* No rx interrupts will be generated if both are zero */
11343 if ((ec->rx_coalesce_usecs == 0) &&
11344 (ec->rx_max_coalesced_frames == 0))
11347 /* No tx interrupts will be generated if both are zero */
11348 if ((ec->tx_coalesce_usecs == 0) &&
11349 (ec->tx_max_coalesced_frames == 0))
11352 /* Only copy relevant parameters, ignore all others. */
11353 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11354 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11355 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11356 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11357 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11358 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11359 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11360 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11361 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11363 if (netif_running(dev)) {
11364 tg3_full_lock(tp, 0);
11365 __tg3_set_coalesce(tp, &tp->coal);
11366 tg3_full_unlock(tp);
11371 static const struct ethtool_ops tg3_ethtool_ops = {
11372 .get_settings = tg3_get_settings,
11373 .set_settings = tg3_set_settings,
11374 .get_drvinfo = tg3_get_drvinfo,
11375 .get_regs_len = tg3_get_regs_len,
11376 .get_regs = tg3_get_regs,
11377 .get_wol = tg3_get_wol,
11378 .set_wol = tg3_set_wol,
11379 .get_msglevel = tg3_get_msglevel,
11380 .set_msglevel = tg3_set_msglevel,
11381 .nway_reset = tg3_nway_reset,
11382 .get_link = ethtool_op_get_link,
11383 .get_eeprom_len = tg3_get_eeprom_len,
11384 .get_eeprom = tg3_get_eeprom,
11385 .set_eeprom = tg3_set_eeprom,
11386 .get_ringparam = tg3_get_ringparam,
11387 .set_ringparam = tg3_set_ringparam,
11388 .get_pauseparam = tg3_get_pauseparam,
11389 .set_pauseparam = tg3_set_pauseparam,
11390 .get_rx_csum = tg3_get_rx_csum,
11391 .set_rx_csum = tg3_set_rx_csum,
11392 .set_tx_csum = tg3_set_tx_csum,
11393 .set_sg = ethtool_op_set_sg,
11394 .set_tso = tg3_set_tso,
11395 .self_test = tg3_self_test,
11396 .get_strings = tg3_get_strings,
11397 .phys_id = tg3_phys_id,
11398 .get_ethtool_stats = tg3_get_ethtool_stats,
11399 .get_coalesce = tg3_get_coalesce,
11400 .set_coalesce = tg3_set_coalesce,
11401 .get_sset_count = tg3_get_sset_count,
11404 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11406 u32 cursize, val, magic;
11408 tp->nvram_size = EEPROM_CHIP_SIZE;
11410 if (tg3_nvram_read(tp, 0, &magic) != 0)
11413 if ((magic != TG3_EEPROM_MAGIC) &&
11414 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11415 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11419 * Size the chip by reading offsets at increasing powers of two.
11420 * When we encounter our validation signature, we know the addressing
11421 * has wrapped around, and thus have our chip size.
11425 while (cursize < tp->nvram_size) {
11426 if (tg3_nvram_read(tp, cursize, &val) != 0)
11435 tp->nvram_size = cursize;
11438 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11442 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11443 tg3_nvram_read(tp, 0, &val) != 0)
11446 /* Selfboot format */
11447 if (val != TG3_EEPROM_MAGIC) {
11448 tg3_get_eeprom_size(tp);
11452 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11454 /* This is confusing. We want to operate on the
11455 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11456 * call will read from NVRAM and byteswap the data
11457 * according to the byteswapping settings for all
11458 * other register accesses. This ensures the data we
11459 * want will always reside in the lower 16-bits.
11460 * However, the data in NVRAM is in LE format, which
11461 * means the data from the NVRAM read will always be
11462 * opposite the endianness of the CPU. The 16-bit
11463 * byteswap then brings the data to CPU endianness.
11465 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11469 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11472 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11476 nvcfg1 = tr32(NVRAM_CFG1);
11477 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11478 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11480 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11481 tw32(NVRAM_CFG1, nvcfg1);
11484 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11485 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11486 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11487 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11488 tp->nvram_jedecnum = JEDEC_ATMEL;
11489 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11490 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11492 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11493 tp->nvram_jedecnum = JEDEC_ATMEL;
11494 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11496 case FLASH_VENDOR_ATMEL_EEPROM:
11497 tp->nvram_jedecnum = JEDEC_ATMEL;
11498 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11499 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11501 case FLASH_VENDOR_ST:
11502 tp->nvram_jedecnum = JEDEC_ST;
11503 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11504 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11506 case FLASH_VENDOR_SAIFUN:
11507 tp->nvram_jedecnum = JEDEC_SAIFUN;
11508 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11510 case FLASH_VENDOR_SST_SMALL:
11511 case FLASH_VENDOR_SST_LARGE:
11512 tp->nvram_jedecnum = JEDEC_SST;
11513 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11517 tp->nvram_jedecnum = JEDEC_ATMEL;
11518 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11519 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11523 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11525 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11526 case FLASH_5752PAGE_SIZE_256:
11527 tp->nvram_pagesize = 256;
11529 case FLASH_5752PAGE_SIZE_512:
11530 tp->nvram_pagesize = 512;
11532 case FLASH_5752PAGE_SIZE_1K:
11533 tp->nvram_pagesize = 1024;
11535 case FLASH_5752PAGE_SIZE_2K:
11536 tp->nvram_pagesize = 2048;
11538 case FLASH_5752PAGE_SIZE_4K:
11539 tp->nvram_pagesize = 4096;
11541 case FLASH_5752PAGE_SIZE_264:
11542 tp->nvram_pagesize = 264;
11544 case FLASH_5752PAGE_SIZE_528:
11545 tp->nvram_pagesize = 528;
11550 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11554 nvcfg1 = tr32(NVRAM_CFG1);
11556 /* NVRAM protection for TPM */
11557 if (nvcfg1 & (1 << 27))
11558 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11560 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11561 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11562 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11563 tp->nvram_jedecnum = JEDEC_ATMEL;
11564 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11566 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11567 tp->nvram_jedecnum = JEDEC_ATMEL;
11568 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11569 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11571 case FLASH_5752VENDOR_ST_M45PE10:
11572 case FLASH_5752VENDOR_ST_M45PE20:
11573 case FLASH_5752VENDOR_ST_M45PE40:
11574 tp->nvram_jedecnum = JEDEC_ST;
11575 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11576 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11580 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11581 tg3_nvram_get_pagesize(tp, nvcfg1);
11583 /* For eeprom, set pagesize to maximum eeprom size */
11584 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11586 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11587 tw32(NVRAM_CFG1, nvcfg1);
11591 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11593 u32 nvcfg1, protect = 0;
11595 nvcfg1 = tr32(NVRAM_CFG1);
11597 /* NVRAM protection for TPM */
11598 if (nvcfg1 & (1 << 27)) {
11599 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11603 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11605 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11606 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11607 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11608 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11609 tp->nvram_jedecnum = JEDEC_ATMEL;
11610 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11611 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11612 tp->nvram_pagesize = 264;
11613 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11614 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11615 tp->nvram_size = (protect ? 0x3e200 :
11616 TG3_NVRAM_SIZE_512KB);
11617 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11618 tp->nvram_size = (protect ? 0x1f200 :
11619 TG3_NVRAM_SIZE_256KB);
11621 tp->nvram_size = (protect ? 0x1f200 :
11622 TG3_NVRAM_SIZE_128KB);
11624 case FLASH_5752VENDOR_ST_M45PE10:
11625 case FLASH_5752VENDOR_ST_M45PE20:
11626 case FLASH_5752VENDOR_ST_M45PE40:
11627 tp->nvram_jedecnum = JEDEC_ST;
11628 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11629 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11630 tp->nvram_pagesize = 256;
11631 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11632 tp->nvram_size = (protect ?
11633 TG3_NVRAM_SIZE_64KB :
11634 TG3_NVRAM_SIZE_128KB);
11635 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11636 tp->nvram_size = (protect ?
11637 TG3_NVRAM_SIZE_64KB :
11638 TG3_NVRAM_SIZE_256KB);
11640 tp->nvram_size = (protect ?
11641 TG3_NVRAM_SIZE_128KB :
11642 TG3_NVRAM_SIZE_512KB);
11647 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11651 nvcfg1 = tr32(NVRAM_CFG1);
11653 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11654 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11655 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11656 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11657 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11658 tp->nvram_jedecnum = JEDEC_ATMEL;
11659 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11660 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11662 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11663 tw32(NVRAM_CFG1, nvcfg1);
11665 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11666 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11667 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11668 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11669 tp->nvram_jedecnum = JEDEC_ATMEL;
11670 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11671 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11672 tp->nvram_pagesize = 264;
11674 case FLASH_5752VENDOR_ST_M45PE10:
11675 case FLASH_5752VENDOR_ST_M45PE20:
11676 case FLASH_5752VENDOR_ST_M45PE40:
11677 tp->nvram_jedecnum = JEDEC_ST;
11678 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11679 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11680 tp->nvram_pagesize = 256;
11685 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11687 u32 nvcfg1, protect = 0;
11689 nvcfg1 = tr32(NVRAM_CFG1);
11691 /* NVRAM protection for TPM */
11692 if (nvcfg1 & (1 << 27)) {
11693 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11697 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11699 case FLASH_5761VENDOR_ATMEL_ADB021D:
11700 case FLASH_5761VENDOR_ATMEL_ADB041D:
11701 case FLASH_5761VENDOR_ATMEL_ADB081D:
11702 case FLASH_5761VENDOR_ATMEL_ADB161D:
11703 case FLASH_5761VENDOR_ATMEL_MDB021D:
11704 case FLASH_5761VENDOR_ATMEL_MDB041D:
11705 case FLASH_5761VENDOR_ATMEL_MDB081D:
11706 case FLASH_5761VENDOR_ATMEL_MDB161D:
11707 tp->nvram_jedecnum = JEDEC_ATMEL;
11708 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11709 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11710 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11711 tp->nvram_pagesize = 256;
11713 case FLASH_5761VENDOR_ST_A_M45PE20:
11714 case FLASH_5761VENDOR_ST_A_M45PE40:
11715 case FLASH_5761VENDOR_ST_A_M45PE80:
11716 case FLASH_5761VENDOR_ST_A_M45PE16:
11717 case FLASH_5761VENDOR_ST_M_M45PE20:
11718 case FLASH_5761VENDOR_ST_M_M45PE40:
11719 case FLASH_5761VENDOR_ST_M_M45PE80:
11720 case FLASH_5761VENDOR_ST_M_M45PE16:
11721 tp->nvram_jedecnum = JEDEC_ST;
11722 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11723 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11724 tp->nvram_pagesize = 256;
11729 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11732 case FLASH_5761VENDOR_ATMEL_ADB161D:
11733 case FLASH_5761VENDOR_ATMEL_MDB161D:
11734 case FLASH_5761VENDOR_ST_A_M45PE16:
11735 case FLASH_5761VENDOR_ST_M_M45PE16:
11736 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11738 case FLASH_5761VENDOR_ATMEL_ADB081D:
11739 case FLASH_5761VENDOR_ATMEL_MDB081D:
11740 case FLASH_5761VENDOR_ST_A_M45PE80:
11741 case FLASH_5761VENDOR_ST_M_M45PE80:
11742 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11744 case FLASH_5761VENDOR_ATMEL_ADB041D:
11745 case FLASH_5761VENDOR_ATMEL_MDB041D:
11746 case FLASH_5761VENDOR_ST_A_M45PE40:
11747 case FLASH_5761VENDOR_ST_M_M45PE40:
11748 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11750 case FLASH_5761VENDOR_ATMEL_ADB021D:
11751 case FLASH_5761VENDOR_ATMEL_MDB021D:
11752 case FLASH_5761VENDOR_ST_A_M45PE20:
11753 case FLASH_5761VENDOR_ST_M_M45PE20:
11754 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11760 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11762 tp->nvram_jedecnum = JEDEC_ATMEL;
11763 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11764 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11767 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11771 nvcfg1 = tr32(NVRAM_CFG1);
11773 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11774 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11775 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11776 tp->nvram_jedecnum = JEDEC_ATMEL;
11777 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11778 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11780 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11781 tw32(NVRAM_CFG1, nvcfg1);
11783 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11784 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11785 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11786 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11787 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11788 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11789 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11790 tp->nvram_jedecnum = JEDEC_ATMEL;
11791 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11792 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11794 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11795 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11796 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11797 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11798 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11800 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11801 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11802 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11804 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11805 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11806 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11810 case FLASH_5752VENDOR_ST_M45PE10:
11811 case FLASH_5752VENDOR_ST_M45PE20:
11812 case FLASH_5752VENDOR_ST_M45PE40:
11813 tp->nvram_jedecnum = JEDEC_ST;
11814 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11815 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11817 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11818 case FLASH_5752VENDOR_ST_M45PE10:
11819 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11821 case FLASH_5752VENDOR_ST_M45PE20:
11822 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11824 case FLASH_5752VENDOR_ST_M45PE40:
11825 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11830 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11834 tg3_nvram_get_pagesize(tp, nvcfg1);
11835 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11836 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11840 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11844 nvcfg1 = tr32(NVRAM_CFG1);
11846 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11847 case FLASH_5717VENDOR_ATMEL_EEPROM:
11848 case FLASH_5717VENDOR_MICRO_EEPROM:
11849 tp->nvram_jedecnum = JEDEC_ATMEL;
11850 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11851 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11853 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11854 tw32(NVRAM_CFG1, nvcfg1);
11856 case FLASH_5717VENDOR_ATMEL_MDB011D:
11857 case FLASH_5717VENDOR_ATMEL_ADB011B:
11858 case FLASH_5717VENDOR_ATMEL_ADB011D:
11859 case FLASH_5717VENDOR_ATMEL_MDB021D:
11860 case FLASH_5717VENDOR_ATMEL_ADB021B:
11861 case FLASH_5717VENDOR_ATMEL_ADB021D:
11862 case FLASH_5717VENDOR_ATMEL_45USPT:
11863 tp->nvram_jedecnum = JEDEC_ATMEL;
11864 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11865 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11867 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11868 case FLASH_5717VENDOR_ATMEL_MDB021D:
11869 case FLASH_5717VENDOR_ATMEL_ADB021B:
11870 case FLASH_5717VENDOR_ATMEL_ADB021D:
11871 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11874 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11878 case FLASH_5717VENDOR_ST_M_M25PE10:
11879 case FLASH_5717VENDOR_ST_A_M25PE10:
11880 case FLASH_5717VENDOR_ST_M_M45PE10:
11881 case FLASH_5717VENDOR_ST_A_M45PE10:
11882 case FLASH_5717VENDOR_ST_M_M25PE20:
11883 case FLASH_5717VENDOR_ST_A_M25PE20:
11884 case FLASH_5717VENDOR_ST_M_M45PE20:
11885 case FLASH_5717VENDOR_ST_A_M45PE20:
11886 case FLASH_5717VENDOR_ST_25USPT:
11887 case FLASH_5717VENDOR_ST_45USPT:
11888 tp->nvram_jedecnum = JEDEC_ST;
11889 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11890 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11892 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11893 case FLASH_5717VENDOR_ST_M_M25PE20:
11894 case FLASH_5717VENDOR_ST_A_M25PE20:
11895 case FLASH_5717VENDOR_ST_M_M45PE20:
11896 case FLASH_5717VENDOR_ST_A_M45PE20:
11897 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11900 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11905 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11909 tg3_nvram_get_pagesize(tp, nvcfg1);
11910 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11911 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11914 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
11916 u32 nvcfg1, nvmpinstrp;
11918 nvcfg1 = tr32(NVRAM_CFG1);
11919 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
11921 switch (nvmpinstrp) {
11922 case FLASH_5720_EEPROM_HD:
11923 case FLASH_5720_EEPROM_LD:
11924 tp->nvram_jedecnum = JEDEC_ATMEL;
11925 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11927 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11928 tw32(NVRAM_CFG1, nvcfg1);
11929 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
11930 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11932 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
11934 case FLASH_5720VENDOR_M_ATMEL_DB011D:
11935 case FLASH_5720VENDOR_A_ATMEL_DB011B:
11936 case FLASH_5720VENDOR_A_ATMEL_DB011D:
11937 case FLASH_5720VENDOR_M_ATMEL_DB021D:
11938 case FLASH_5720VENDOR_A_ATMEL_DB021B:
11939 case FLASH_5720VENDOR_A_ATMEL_DB021D:
11940 case FLASH_5720VENDOR_M_ATMEL_DB041D:
11941 case FLASH_5720VENDOR_A_ATMEL_DB041B:
11942 case FLASH_5720VENDOR_A_ATMEL_DB041D:
11943 case FLASH_5720VENDOR_M_ATMEL_DB081D:
11944 case FLASH_5720VENDOR_A_ATMEL_DB081D:
11945 case FLASH_5720VENDOR_ATMEL_45USPT:
11946 tp->nvram_jedecnum = JEDEC_ATMEL;
11947 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11948 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11950 switch (nvmpinstrp) {
11951 case FLASH_5720VENDOR_M_ATMEL_DB021D:
11952 case FLASH_5720VENDOR_A_ATMEL_DB021B:
11953 case FLASH_5720VENDOR_A_ATMEL_DB021D:
11954 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11956 case FLASH_5720VENDOR_M_ATMEL_DB041D:
11957 case FLASH_5720VENDOR_A_ATMEL_DB041B:
11958 case FLASH_5720VENDOR_A_ATMEL_DB041D:
11959 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11961 case FLASH_5720VENDOR_M_ATMEL_DB081D:
11962 case FLASH_5720VENDOR_A_ATMEL_DB081D:
11963 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11966 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11970 case FLASH_5720VENDOR_M_ST_M25PE10:
11971 case FLASH_5720VENDOR_M_ST_M45PE10:
11972 case FLASH_5720VENDOR_A_ST_M25PE10:
11973 case FLASH_5720VENDOR_A_ST_M45PE10:
11974 case FLASH_5720VENDOR_M_ST_M25PE20:
11975 case FLASH_5720VENDOR_M_ST_M45PE20:
11976 case FLASH_5720VENDOR_A_ST_M25PE20:
11977 case FLASH_5720VENDOR_A_ST_M45PE20:
11978 case FLASH_5720VENDOR_M_ST_M25PE40:
11979 case FLASH_5720VENDOR_M_ST_M45PE40:
11980 case FLASH_5720VENDOR_A_ST_M25PE40:
11981 case FLASH_5720VENDOR_A_ST_M45PE40:
11982 case FLASH_5720VENDOR_M_ST_M25PE80:
11983 case FLASH_5720VENDOR_M_ST_M45PE80:
11984 case FLASH_5720VENDOR_A_ST_M25PE80:
11985 case FLASH_5720VENDOR_A_ST_M45PE80:
11986 case FLASH_5720VENDOR_ST_25USPT:
11987 case FLASH_5720VENDOR_ST_45USPT:
11988 tp->nvram_jedecnum = JEDEC_ST;
11989 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11990 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11992 switch (nvmpinstrp) {
11993 case FLASH_5720VENDOR_M_ST_M25PE20:
11994 case FLASH_5720VENDOR_M_ST_M45PE20:
11995 case FLASH_5720VENDOR_A_ST_M25PE20:
11996 case FLASH_5720VENDOR_A_ST_M45PE20:
11997 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11999 case FLASH_5720VENDOR_M_ST_M25PE40:
12000 case FLASH_5720VENDOR_M_ST_M45PE40:
12001 case FLASH_5720VENDOR_A_ST_M25PE40:
12002 case FLASH_5720VENDOR_A_ST_M45PE40:
12003 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12005 case FLASH_5720VENDOR_M_ST_M25PE80:
12006 case FLASH_5720VENDOR_M_ST_M45PE80:
12007 case FLASH_5720VENDOR_A_ST_M25PE80:
12008 case FLASH_5720VENDOR_A_ST_M45PE80:
12009 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12012 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12017 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
12021 tg3_nvram_get_pagesize(tp, nvcfg1);
12022 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12023 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
12026 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12027 static void __devinit tg3_nvram_init(struct tg3 *tp)
12029 tw32_f(GRC_EEPROM_ADDR,
12030 (EEPROM_ADDR_FSM_RESET |
12031 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12032 EEPROM_ADDR_CLKPERD_SHIFT)));
12036 /* Enable seeprom accesses. */
12037 tw32_f(GRC_LOCAL_CTRL,
12038 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12041 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12042 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12043 tp->tg3_flags |= TG3_FLAG_NVRAM;
12045 if (tg3_nvram_lock(tp)) {
12046 netdev_warn(tp->dev,
12047 "Cannot get nvram lock, %s failed\n",
12051 tg3_enable_nvram_access(tp);
12053 tp->nvram_size = 0;
12055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12056 tg3_get_5752_nvram_info(tp);
12057 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12058 tg3_get_5755_nvram_info(tp);
12059 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12062 tg3_get_5787_nvram_info(tp);
12063 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12064 tg3_get_5761_nvram_info(tp);
12065 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12066 tg3_get_5906_nvram_info(tp);
12067 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12068 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12069 tg3_get_57780_nvram_info(tp);
12070 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12071 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12072 tg3_get_5717_nvram_info(tp);
12073 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12074 tg3_get_5720_nvram_info(tp);
12076 tg3_get_nvram_info(tp);
12078 if (tp->nvram_size == 0)
12079 tg3_get_nvram_size(tp);
12081 tg3_disable_nvram_access(tp);
12082 tg3_nvram_unlock(tp);
12085 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
12087 tg3_get_eeprom_size(tp);
12091 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12092 u32 offset, u32 len, u8 *buf)
12097 for (i = 0; i < len; i += 4) {
12103 memcpy(&data, buf + i, 4);
12106 * The SEEPROM interface expects the data to always be opposite
12107 * the native endian format. We accomplish this by reversing
12108 * all the operations that would have been performed on the
12109 * data from a call to tg3_nvram_read_be32().
12111 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12113 val = tr32(GRC_EEPROM_ADDR);
12114 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12116 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12118 tw32(GRC_EEPROM_ADDR, val |
12119 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12120 (addr & EEPROM_ADDR_ADDR_MASK) |
12121 EEPROM_ADDR_START |
12122 EEPROM_ADDR_WRITE);
12124 for (j = 0; j < 1000; j++) {
12125 val = tr32(GRC_EEPROM_ADDR);
12127 if (val & EEPROM_ADDR_COMPLETE)
12131 if (!(val & EEPROM_ADDR_COMPLETE)) {
12140 /* offset and length are dword aligned */
12141 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12145 u32 pagesize = tp->nvram_pagesize;
12146 u32 pagemask = pagesize - 1;
12150 tmp = kmalloc(pagesize, GFP_KERNEL);
12156 u32 phy_addr, page_off, size;
12158 phy_addr = offset & ~pagemask;
12160 for (j = 0; j < pagesize; j += 4) {
12161 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12162 (__be32 *) (tmp + j));
12169 page_off = offset & pagemask;
12176 memcpy(tmp + page_off, buf, size);
12178 offset = offset + (pagesize - page_off);
12180 tg3_enable_nvram_access(tp);
12183 * Before we can erase the flash page, we need
12184 * to issue a special "write enable" command.
12186 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12188 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12191 /* Erase the target page */
12192 tw32(NVRAM_ADDR, phy_addr);
12194 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12195 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12197 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12200 /* Issue another write enable to start the write. */
12201 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12203 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12206 for (j = 0; j < pagesize; j += 4) {
12209 data = *((__be32 *) (tmp + j));
12211 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12213 tw32(NVRAM_ADDR, phy_addr + j);
12215 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12219 nvram_cmd |= NVRAM_CMD_FIRST;
12220 else if (j == (pagesize - 4))
12221 nvram_cmd |= NVRAM_CMD_LAST;
12223 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12230 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12231 tg3_nvram_exec_cmd(tp, nvram_cmd);
12238 /* offset and length are dword aligned */
12239 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12244 for (i = 0; i < len; i += 4, offset += 4) {
12245 u32 page_off, phy_addr, nvram_cmd;
12248 memcpy(&data, buf + i, 4);
12249 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12251 page_off = offset % tp->nvram_pagesize;
12253 phy_addr = tg3_nvram_phys_addr(tp, offset);
12255 tw32(NVRAM_ADDR, phy_addr);
12257 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12259 if (page_off == 0 || i == 0)
12260 nvram_cmd |= NVRAM_CMD_FIRST;
12261 if (page_off == (tp->nvram_pagesize - 4))
12262 nvram_cmd |= NVRAM_CMD_LAST;
12264 if (i == (len - 4))
12265 nvram_cmd |= NVRAM_CMD_LAST;
12267 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12268 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
12269 (tp->nvram_jedecnum == JEDEC_ST) &&
12270 (nvram_cmd & NVRAM_CMD_FIRST)) {
12272 if ((ret = tg3_nvram_exec_cmd(tp,
12273 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12278 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12279 /* We always do complete word writes to eeprom. */
12280 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12283 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12289 /* offset and length are dword aligned */
12290 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12294 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12295 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12296 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12300 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
12301 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12305 ret = tg3_nvram_lock(tp);
12309 tg3_enable_nvram_access(tp);
12310 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
12311 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
12312 tw32(NVRAM_WRITE1, 0x406);
12314 grc_mode = tr32(GRC_MODE);
12315 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12317 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
12318 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12320 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12323 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12327 grc_mode = tr32(GRC_MODE);
12328 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12330 tg3_disable_nvram_access(tp);
12331 tg3_nvram_unlock(tp);
12334 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12335 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12342 struct subsys_tbl_ent {
12343 u16 subsys_vendor, subsys_devid;
12347 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12348 /* Broadcom boards. */
12349 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12350 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12351 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12352 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12353 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12354 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12355 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12356 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12357 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12358 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12359 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12360 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12361 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12362 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12363 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12364 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12365 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12366 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12367 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12368 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12369 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12370 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12373 { TG3PCI_SUBVENDOR_ID_3COM,
12374 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12375 { TG3PCI_SUBVENDOR_ID_3COM,
12376 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12377 { TG3PCI_SUBVENDOR_ID_3COM,
12378 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12379 { TG3PCI_SUBVENDOR_ID_3COM,
12380 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12381 { TG3PCI_SUBVENDOR_ID_3COM,
12382 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12385 { TG3PCI_SUBVENDOR_ID_DELL,
12386 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12387 { TG3PCI_SUBVENDOR_ID_DELL,
12388 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12389 { TG3PCI_SUBVENDOR_ID_DELL,
12390 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12391 { TG3PCI_SUBVENDOR_ID_DELL,
12392 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12394 /* Compaq boards. */
12395 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12396 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12397 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12398 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12399 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12400 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12401 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12402 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12403 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12404 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12407 { TG3PCI_SUBVENDOR_ID_IBM,
12408 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12411 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12415 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12416 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12417 tp->pdev->subsystem_vendor) &&
12418 (subsys_id_to_phy_id[i].subsys_devid ==
12419 tp->pdev->subsystem_device))
12420 return &subsys_id_to_phy_id[i];
12425 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12430 /* On some early chips the SRAM cannot be accessed in D3hot state,
12431 * so need make sure we're in D0.
12433 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12434 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12435 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12438 /* Make sure register accesses (indirect or otherwise)
12439 * will function correctly.
12441 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12442 tp->misc_host_ctrl);
12444 /* The memory arbiter has to be enabled in order for SRAM accesses
12445 * to succeed. Normally on powerup the tg3 chip firmware will make
12446 * sure it is enabled, but other entities such as system netboot
12447 * code might disable it.
12449 val = tr32(MEMARB_MODE);
12450 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12452 tp->phy_id = TG3_PHY_ID_INVALID;
12453 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12455 /* Assume an onboard device and WOL capable by default. */
12456 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12459 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12460 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12461 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12463 val = tr32(VCPU_CFGSHDW);
12464 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12465 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12466 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12467 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12468 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12472 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12473 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12474 u32 nic_cfg, led_cfg;
12475 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12476 int eeprom_phy_serdes = 0;
12478 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12479 tp->nic_sram_data_cfg = nic_cfg;
12481 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12482 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12483 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12484 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12485 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12486 (ver > 0) && (ver < 0x100))
12487 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12490 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12492 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12493 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12494 eeprom_phy_serdes = 1;
12496 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12497 if (nic_phy_id != 0) {
12498 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12499 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12501 eeprom_phy_id = (id1 >> 16) << 10;
12502 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12503 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12507 tp->phy_id = eeprom_phy_id;
12508 if (eeprom_phy_serdes) {
12509 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12510 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12512 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12515 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12516 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12517 SHASTA_EXT_LED_MODE_MASK);
12519 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12523 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12524 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12527 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12528 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12531 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12532 tp->led_ctrl = LED_CTRL_MODE_MAC;
12534 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12535 * read on some older 5700/5701 bootcode.
12537 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12539 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12541 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12545 case SHASTA_EXT_LED_SHARED:
12546 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12547 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12548 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12549 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12550 LED_CTRL_MODE_PHY_2);
12553 case SHASTA_EXT_LED_MAC:
12554 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12557 case SHASTA_EXT_LED_COMBO:
12558 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12559 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12560 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12561 LED_CTRL_MODE_PHY_2);
12566 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12567 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12568 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12569 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12571 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12572 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12574 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12575 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12576 if ((tp->pdev->subsystem_vendor ==
12577 PCI_VENDOR_ID_ARIMA) &&
12578 (tp->pdev->subsystem_device == 0x205a ||
12579 tp->pdev->subsystem_device == 0x2063))
12580 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12582 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12583 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12586 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12587 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12588 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12589 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12592 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12593 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12594 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12596 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12597 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12598 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12600 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12601 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12602 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12604 if (cfg2 & (1 << 17))
12605 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12607 /* serdes signal pre-emphasis in register 0x590 set by */
12608 /* bootcode if bit 18 is set */
12609 if (cfg2 & (1 << 18))
12610 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12612 if (((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) ||
12613 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12614 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
12615 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12616 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12618 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12619 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12620 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
12623 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12624 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12625 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12628 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12629 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12630 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12631 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12632 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12633 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12636 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
12637 device_set_wakeup_enable(&tp->pdev->dev,
12638 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12640 device_set_wakeup_capable(&tp->pdev->dev, false);
12643 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12648 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12649 tw32(OTP_CTRL, cmd);
12651 /* Wait for up to 1 ms for command to execute. */
12652 for (i = 0; i < 100; i++) {
12653 val = tr32(OTP_STATUS);
12654 if (val & OTP_STATUS_CMD_DONE)
12659 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12662 /* Read the gphy configuration from the OTP region of the chip. The gphy
12663 * configuration is a 32-bit value that straddles the alignment boundary.
12664 * We do two 32-bit reads and then shift and merge the results.
12666 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12668 u32 bhalf_otp, thalf_otp;
12670 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12672 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12675 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12677 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12680 thalf_otp = tr32(OTP_READ_DATA);
12682 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12684 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12687 bhalf_otp = tr32(OTP_READ_DATA);
12689 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12692 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12694 u32 adv = ADVERTISED_Autoneg |
12697 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12698 adv |= ADVERTISED_1000baseT_Half |
12699 ADVERTISED_1000baseT_Full;
12701 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12702 adv |= ADVERTISED_100baseT_Half |
12703 ADVERTISED_100baseT_Full |
12704 ADVERTISED_10baseT_Half |
12705 ADVERTISED_10baseT_Full |
12708 adv |= ADVERTISED_FIBRE;
12710 tp->link_config.advertising = adv;
12711 tp->link_config.speed = SPEED_INVALID;
12712 tp->link_config.duplex = DUPLEX_INVALID;
12713 tp->link_config.autoneg = AUTONEG_ENABLE;
12714 tp->link_config.active_speed = SPEED_INVALID;
12715 tp->link_config.active_duplex = DUPLEX_INVALID;
12716 tp->link_config.orig_speed = SPEED_INVALID;
12717 tp->link_config.orig_duplex = DUPLEX_INVALID;
12718 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12721 static int __devinit tg3_phy_probe(struct tg3 *tp)
12723 u32 hw_phy_id_1, hw_phy_id_2;
12724 u32 hw_phy_id, hw_phy_id_masked;
12727 /* flow control autonegotiation is default behavior */
12728 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12729 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12731 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12732 return tg3_phy_init(tp);
12734 /* Reading the PHY ID register can conflict with ASF
12735 * firmware access to the PHY hardware.
12738 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12739 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12740 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12742 /* Now read the physical PHY_ID from the chip and verify
12743 * that it is sane. If it doesn't look good, we fall back
12744 * to either the hard-coded table based PHY_ID and failing
12745 * that the value found in the eeprom area.
12747 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12748 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12750 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12751 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12752 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12754 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12757 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12758 tp->phy_id = hw_phy_id;
12759 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12760 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12762 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12764 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12765 /* Do nothing, phy ID already set up in
12766 * tg3_get_eeprom_hw_cfg().
12769 struct subsys_tbl_ent *p;
12771 /* No eeprom signature? Try the hardcoded
12772 * subsys device table.
12774 p = tg3_lookup_by_subsys(tp);
12778 tp->phy_id = p->phy_id;
12780 tp->phy_id == TG3_PHY_ID_BCM8002)
12781 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12785 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12786 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12787 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12788 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12789 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12790 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12792 tg3_phy_init_link_config(tp);
12794 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12795 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12796 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12797 u32 bmsr, adv_reg, tg3_ctrl, mask;
12799 tg3_readphy(tp, MII_BMSR, &bmsr);
12800 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12801 (bmsr & BMSR_LSTATUS))
12802 goto skip_phy_reset;
12804 err = tg3_phy_reset(tp);
12808 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12809 ADVERTISE_100HALF | ADVERTISE_100FULL |
12810 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12812 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
12813 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12814 MII_TG3_CTRL_ADV_1000_FULL);
12815 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12816 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12817 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12818 MII_TG3_CTRL_ENABLE_AS_MASTER);
12821 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12822 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12823 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12824 if (!tg3_copper_is_advertising_all(tp, mask)) {
12825 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12827 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12828 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12830 tg3_writephy(tp, MII_BMCR,
12831 BMCR_ANENABLE | BMCR_ANRESTART);
12833 tg3_phy_set_wirespeed(tp);
12835 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12836 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12837 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12841 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12842 err = tg3_init_5401phy_dsp(tp);
12846 err = tg3_init_5401phy_dsp(tp);
12852 static void __devinit tg3_read_vpd(struct tg3 *tp)
12855 unsigned int block_end, rosize, len;
12859 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12860 tg3_nvram_read(tp, 0x0, &magic))
12863 vpd_data = kmalloc(TG3_NVM_VPD_LEN, GFP_KERNEL);
12867 if (magic == TG3_EEPROM_MAGIC) {
12868 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12871 /* The data is in little-endian format in NVRAM.
12872 * Use the big-endian read routines to preserve
12873 * the byte order as it exists in NVRAM.
12875 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12876 goto out_not_found;
12878 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12882 unsigned int pos = 0;
12884 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12885 cnt = pci_read_vpd(tp->pdev, pos,
12886 TG3_NVM_VPD_LEN - pos,
12888 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12891 goto out_not_found;
12893 if (pos != TG3_NVM_VPD_LEN)
12894 goto out_not_found;
12897 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12898 PCI_VPD_LRDT_RO_DATA);
12900 goto out_not_found;
12902 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
12903 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
12904 i += PCI_VPD_LRDT_TAG_SIZE;
12906 if (block_end > TG3_NVM_VPD_LEN)
12907 goto out_not_found;
12909 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12910 PCI_VPD_RO_KEYWORD_MFR_ID);
12912 len = pci_vpd_info_field_size(&vpd_data[j]);
12914 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12915 if (j + len > block_end || len != 4 ||
12916 memcmp(&vpd_data[j], "1028", 4))
12919 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12920 PCI_VPD_RO_KEYWORD_VENDOR0);
12924 len = pci_vpd_info_field_size(&vpd_data[j]);
12926 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12927 if (j + len > block_end)
12930 memcpy(tp->fw_ver, &vpd_data[j], len);
12931 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
12935 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12936 PCI_VPD_RO_KEYWORD_PARTNO);
12938 goto out_not_found;
12940 len = pci_vpd_info_field_size(&vpd_data[i]);
12942 i += PCI_VPD_INFO_FLD_HDR_SIZE;
12943 if (len > TG3_BPN_SIZE ||
12944 (len + i) > TG3_NVM_VPD_LEN)
12945 goto out_not_found;
12947 memcpy(tp->board_part_number, &vpd_data[i], len);
12951 if (tp->board_part_number[0])
12955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12956 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
12957 strcpy(tp->board_part_number, "BCM5717");
12958 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
12959 strcpy(tp->board_part_number, "BCM5718");
12962 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
12963 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12964 strcpy(tp->board_part_number, "BCM57780");
12965 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12966 strcpy(tp->board_part_number, "BCM57760");
12967 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12968 strcpy(tp->board_part_number, "BCM57790");
12969 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12970 strcpy(tp->board_part_number, "BCM57788");
12973 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
12974 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12975 strcpy(tp->board_part_number, "BCM57761");
12976 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12977 strcpy(tp->board_part_number, "BCM57765");
12978 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12979 strcpy(tp->board_part_number, "BCM57781");
12980 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12981 strcpy(tp->board_part_number, "BCM57785");
12982 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12983 strcpy(tp->board_part_number, "BCM57791");
12984 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12985 strcpy(tp->board_part_number, "BCM57795");
12988 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12989 strcpy(tp->board_part_number, "BCM95906");
12992 strcpy(tp->board_part_number, "none");
12996 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13000 if (tg3_nvram_read(tp, offset, &val) ||
13001 (val & 0xfc000000) != 0x0c000000 ||
13002 tg3_nvram_read(tp, offset + 4, &val) ||
13009 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13011 u32 val, offset, start, ver_offset;
13013 bool newver = false;
13015 if (tg3_nvram_read(tp, 0xc, &offset) ||
13016 tg3_nvram_read(tp, 0x4, &start))
13019 offset = tg3_nvram_logical_addr(tp, offset);
13021 if (tg3_nvram_read(tp, offset, &val))
13024 if ((val & 0xfc000000) == 0x0c000000) {
13025 if (tg3_nvram_read(tp, offset + 4, &val))
13032 dst_off = strlen(tp->fw_ver);
13035 if (TG3_VER_SIZE - dst_off < 16 ||
13036 tg3_nvram_read(tp, offset + 8, &ver_offset))
13039 offset = offset + ver_offset - start;
13040 for (i = 0; i < 16; i += 4) {
13042 if (tg3_nvram_read_be32(tp, offset + i, &v))
13045 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13050 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13053 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13054 TG3_NVM_BCVER_MAJSFT;
13055 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13056 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13057 "v%d.%02d", major, minor);
13061 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13063 u32 val, major, minor;
13065 /* Use native endian representation */
13066 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13069 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13070 TG3_NVM_HWSB_CFG1_MAJSFT;
13071 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13072 TG3_NVM_HWSB_CFG1_MINSFT;
13074 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13077 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13079 u32 offset, major, minor, build;
13081 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13083 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13086 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13087 case TG3_EEPROM_SB_REVISION_0:
13088 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13090 case TG3_EEPROM_SB_REVISION_2:
13091 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13093 case TG3_EEPROM_SB_REVISION_3:
13094 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13096 case TG3_EEPROM_SB_REVISION_4:
13097 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13099 case TG3_EEPROM_SB_REVISION_5:
13100 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13102 case TG3_EEPROM_SB_REVISION_6:
13103 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13109 if (tg3_nvram_read(tp, offset, &val))
13112 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13113 TG3_EEPROM_SB_EDH_BLD_SHFT;
13114 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13115 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13116 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13118 if (minor > 99 || build > 26)
13121 offset = strlen(tp->fw_ver);
13122 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13123 " v%d.%02d", major, minor);
13126 offset = strlen(tp->fw_ver);
13127 if (offset < TG3_VER_SIZE - 1)
13128 tp->fw_ver[offset] = 'a' + build - 1;
13132 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13134 u32 val, offset, start;
13137 for (offset = TG3_NVM_DIR_START;
13138 offset < TG3_NVM_DIR_END;
13139 offset += TG3_NVM_DIRENT_SIZE) {
13140 if (tg3_nvram_read(tp, offset, &val))
13143 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13147 if (offset == TG3_NVM_DIR_END)
13150 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
13151 start = 0x08000000;
13152 else if (tg3_nvram_read(tp, offset - 4, &start))
13155 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13156 !tg3_fw_img_is_valid(tp, offset) ||
13157 tg3_nvram_read(tp, offset + 8, &val))
13160 offset += val - start;
13162 vlen = strlen(tp->fw_ver);
13164 tp->fw_ver[vlen++] = ',';
13165 tp->fw_ver[vlen++] = ' ';
13167 for (i = 0; i < 4; i++) {
13169 if (tg3_nvram_read_be32(tp, offset, &v))
13172 offset += sizeof(v);
13174 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13175 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13179 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13184 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13190 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
13191 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
13194 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13195 if (apedata != APE_SEG_SIG_MAGIC)
13198 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13199 if (!(apedata & APE_FW_STATUS_READY))
13202 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13204 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13205 tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
13211 vlen = strlen(tp->fw_ver);
13213 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13215 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13216 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13217 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13218 (apedata & APE_FW_VERSION_BLDMSK));
13221 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13224 bool vpd_vers = false;
13226 if (tp->fw_ver[0] != 0)
13229 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
13230 strcat(tp->fw_ver, "sb");
13234 if (tg3_nvram_read(tp, 0, &val))
13237 if (val == TG3_EEPROM_MAGIC)
13238 tg3_read_bc_ver(tp);
13239 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13240 tg3_read_sb_ver(tp, val);
13241 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13242 tg3_read_hwsb_ver(tp);
13246 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
13247 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
13250 tg3_read_mgmtfw_ver(tp);
13253 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13256 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13258 static inline void vlan_features_add(struct net_device *dev, unsigned long flags)
13260 dev->vlan_features |= flags;
13263 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13265 if (tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP)
13266 return TG3_RX_RET_MAX_SIZE_5717;
13267 else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
13268 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13269 return TG3_RX_RET_MAX_SIZE_5700;
13271 return TG3_RX_RET_MAX_SIZE_5705;
13274 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13275 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13276 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13277 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13281 static int __devinit tg3_get_invariants(struct tg3 *tp)
13284 u32 pci_state_reg, grc_misc_cfg;
13289 /* Force memory write invalidate off. If we leave it on,
13290 * then on 5700_BX chips we have to enable a workaround.
13291 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13292 * to match the cacheline size. The Broadcom driver have this
13293 * workaround but turns MWI off all the times so never uses
13294 * it. This seems to suggest that the workaround is insufficient.
13296 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13297 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13298 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13300 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13301 * has the register indirect write enable bit set before
13302 * we try to access any of the MMIO registers. It is also
13303 * critical that the PCI-X hw workaround situation is decided
13304 * before that as well.
13306 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13309 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13310 MISC_HOST_CTRL_CHIPREV_SHIFT);
13311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13312 u32 prod_id_asic_rev;
13314 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13315 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13316 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13317 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13318 pci_read_config_dword(tp->pdev,
13319 TG3PCI_GEN2_PRODID_ASICREV,
13320 &prod_id_asic_rev);
13321 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13322 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13323 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13324 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13325 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13326 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13327 pci_read_config_dword(tp->pdev,
13328 TG3PCI_GEN15_PRODID_ASICREV,
13329 &prod_id_asic_rev);
13331 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13332 &prod_id_asic_rev);
13334 tp->pci_chip_rev_id = prod_id_asic_rev;
13337 /* Wrong chip ID in 5752 A0. This code can be removed later
13338 * as A0 is not in production.
13340 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13341 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13343 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13344 * we need to disable memory and use config. cycles
13345 * only to access all registers. The 5702/03 chips
13346 * can mistakenly decode the special cycles from the
13347 * ICH chipsets as memory write cycles, causing corruption
13348 * of register and memory space. Only certain ICH bridges
13349 * will drive special cycles with non-zero data during the
13350 * address phase which can fall within the 5703's address
13351 * range. This is not an ICH bug as the PCI spec allows
13352 * non-zero address during special cycles. However, only
13353 * these ICH bridges are known to drive non-zero addresses
13354 * during special cycles.
13356 * Since special cycles do not cross PCI bridges, we only
13357 * enable this workaround if the 5703 is on the secondary
13358 * bus of these ICH bridges.
13360 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13361 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13362 static struct tg3_dev_id {
13366 } ich_chipsets[] = {
13367 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13369 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13371 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13373 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13377 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13378 struct pci_dev *bridge = NULL;
13380 while (pci_id->vendor != 0) {
13381 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13387 if (pci_id->rev != PCI_ANY_ID) {
13388 if (bridge->revision > pci_id->rev)
13391 if (bridge->subordinate &&
13392 (bridge->subordinate->number ==
13393 tp->pdev->bus->number)) {
13395 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
13396 pci_dev_put(bridge);
13402 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13403 static struct tg3_dev_id {
13406 } bridge_chipsets[] = {
13407 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13408 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13411 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13412 struct pci_dev *bridge = NULL;
13414 while (pci_id->vendor != 0) {
13415 bridge = pci_get_device(pci_id->vendor,
13422 if (bridge->subordinate &&
13423 (bridge->subordinate->number <=
13424 tp->pdev->bus->number) &&
13425 (bridge->subordinate->subordinate >=
13426 tp->pdev->bus->number)) {
13427 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
13428 pci_dev_put(bridge);
13434 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13435 * DMA addresses > 40-bit. This bridge may have other additional
13436 * 57xx devices behind it in some 4-port NIC designs for example.
13437 * Any tg3 device found behind the bridge will also need the 40-bit
13440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13442 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13443 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13444 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13446 struct pci_dev *bridge = NULL;
13449 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13450 PCI_DEVICE_ID_SERVERWORKS_EPB,
13452 if (bridge && bridge->subordinate &&
13453 (bridge->subordinate->number <=
13454 tp->pdev->bus->number) &&
13455 (bridge->subordinate->subordinate >=
13456 tp->pdev->bus->number)) {
13457 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13458 pci_dev_put(bridge);
13464 /* Initialize misc host control in PCI block. */
13465 tp->misc_host_ctrl |= (misc_ctrl_reg &
13466 MISC_HOST_CTRL_CHIPREV);
13467 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13468 tp->misc_host_ctrl);
13470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13472 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13474 tp->pdev_peer = tg3_find_peer(tp);
13476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13479 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
13481 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13482 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13483 tp->tg3_flags3 |= TG3_FLG3_57765_PLUS;
13485 /* Intentionally exclude ASIC_REV_5906 */
13486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13487 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13492 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS))
13493 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13497 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13498 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13499 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13500 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13502 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13503 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13504 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13506 /* 5700 B0 chips do not support checksumming correctly due
13507 * to hardware bugs.
13509 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
13510 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
13512 unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
13514 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13515 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13516 features |= NETIF_F_IPV6_CSUM;
13517 tp->dev->features |= features;
13518 vlan_features_add(tp->dev, features);
13521 /* Determine TSO capabilities */
13522 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13523 ; /* Do nothing. HW bug. */
13524 else if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
13525 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13526 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13528 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13529 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13530 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13531 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13532 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13533 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13534 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13535 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13536 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13537 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13538 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13539 tp->fw_needed = FIRMWARE_TG3TSO5;
13541 tp->fw_needed = FIRMWARE_TG3TSO;
13546 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13547 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13548 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13549 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13550 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13551 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13552 tp->pdev_peer == tp->pdev))
13553 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13555 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13556 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13557 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13560 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
13561 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13562 tp->irq_max = TG3_IRQ_MAX_VECS;
13566 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13567 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13568 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13569 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13570 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13571 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13572 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13575 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13576 tp->tg3_flags3 |= TG3_FLG3_LRG_PROD_RING_CAP;
13578 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
13579 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13580 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13582 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13583 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13584 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13585 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13587 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13590 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13591 if (tp->pcie_cap != 0) {
13594 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13596 tp->pcie_readrq = 4096;
13597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13598 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13599 tp->pcie_readrq = 2048;
13601 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13603 pci_read_config_word(tp->pdev,
13604 tp->pcie_cap + PCI_EXP_LNKCTL,
13606 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13608 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13610 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13611 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13612 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13613 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13614 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13615 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13617 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13618 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13619 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13620 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13621 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13622 if (!tp->pcix_cap) {
13623 dev_err(&tp->pdev->dev,
13624 "Cannot find PCI-X capability, aborting\n");
13628 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13629 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13632 /* If we have an AMD 762 or VIA K8T800 chipset, write
13633 * reordering to the mailbox registers done by the host
13634 * controller can cause major troubles. We read back from
13635 * every mailbox register write to force the writes to be
13636 * posted to the chip in order.
13638 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13639 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13640 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13642 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13643 &tp->pci_cacheline_sz);
13644 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13645 &tp->pci_lat_timer);
13646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13647 tp->pci_lat_timer < 64) {
13648 tp->pci_lat_timer = 64;
13649 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13650 tp->pci_lat_timer);
13653 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13654 /* 5700 BX chips need to have their TX producer index
13655 * mailboxes written twice to workaround a bug.
13657 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13659 /* If we are in PCI-X mode, enable register write workaround.
13661 * The workaround is to use indirect register accesses
13662 * for all chip writes not to mailbox registers.
13664 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13667 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13669 /* The chip can have it's power management PCI config
13670 * space registers clobbered due to this bug.
13671 * So explicitly force the chip into D0 here.
13673 pci_read_config_dword(tp->pdev,
13674 tp->pm_cap + PCI_PM_CTRL,
13676 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13677 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13678 pci_write_config_dword(tp->pdev,
13679 tp->pm_cap + PCI_PM_CTRL,
13682 /* Also, force SERR#/PERR# in PCI command. */
13683 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13684 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13685 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13689 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13690 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13691 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13692 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13694 /* Chip-specific fixup from Broadcom driver */
13695 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13696 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13697 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13698 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13701 /* Default fast path register access methods */
13702 tp->read32 = tg3_read32;
13703 tp->write32 = tg3_write32;
13704 tp->read32_mbox = tg3_read32;
13705 tp->write32_mbox = tg3_write32;
13706 tp->write32_tx_mbox = tg3_write32;
13707 tp->write32_rx_mbox = tg3_write32;
13709 /* Various workaround register access methods */
13710 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13711 tp->write32 = tg3_write_indirect_reg32;
13712 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13713 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13714 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13716 * Back to back register writes can cause problems on these
13717 * chips, the workaround is to read back all reg writes
13718 * except those to mailbox regs.
13720 * See tg3_write_indirect_reg32().
13722 tp->write32 = tg3_write_flush_reg32;
13725 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13726 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13727 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13728 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13729 tp->write32_rx_mbox = tg3_write_flush_reg32;
13732 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13733 tp->read32 = tg3_read_indirect_reg32;
13734 tp->write32 = tg3_write_indirect_reg32;
13735 tp->read32_mbox = tg3_read_indirect_mbox;
13736 tp->write32_mbox = tg3_write_indirect_mbox;
13737 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13738 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13743 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13744 pci_cmd &= ~PCI_COMMAND_MEMORY;
13745 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13748 tp->read32_mbox = tg3_read32_mbox_5906;
13749 tp->write32_mbox = tg3_write32_mbox_5906;
13750 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13751 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13754 if (tp->write32 == tg3_write_indirect_reg32 ||
13755 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13756 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13757 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13758 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13760 /* Get eeprom hw config before calling tg3_set_power_state().
13761 * In particular, the TG3_FLG2_IS_NIC flag must be
13762 * determined before calling tg3_set_power_state() so that
13763 * we know whether or not to switch out of Vaux power.
13764 * When the flag is set, it means that GPIO1 is used for eeprom
13765 * write protect and also implies that it is a LOM where GPIOs
13766 * are not used to switch power.
13768 tg3_get_eeprom_hw_cfg(tp);
13770 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13771 /* Allow reads and writes to the
13772 * APE register and memory space.
13774 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13775 PCISTATE_ALLOW_APE_SHMEM_WR |
13776 PCISTATE_ALLOW_APE_PSPACE_WR;
13777 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13785 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS))
13786 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13788 /* Set up tp->grc_local_ctrl before calling tg_power_up().
13789 * GPIO1 driven high will bring 5700's external PHY out of reset.
13790 * It is also used as eeprom write protect on LOMs.
13792 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13793 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13794 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13795 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13796 GRC_LCLCTRL_GPIO_OUTPUT1);
13797 /* Unused GPIO3 must be driven as output on 5752 because there
13798 * are no pull-up resistors on unused GPIO pins.
13800 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13801 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13804 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13805 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13806 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13808 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13809 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13810 /* Turn off the debug UART. */
13811 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13812 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13813 /* Keep VMain power. */
13814 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13815 GRC_LCLCTRL_GPIO_OUTPUT0;
13818 /* Force the chip into D0. */
13819 err = tg3_power_up(tp);
13821 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13825 /* Derive initial jumbo mode from MTU assigned in
13826 * ether_setup() via the alloc_etherdev() call
13828 if (tp->dev->mtu > ETH_DATA_LEN &&
13829 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13830 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13832 /* Determine WakeOnLan speed to use. */
13833 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13834 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13835 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13836 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13837 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13839 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13842 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13843 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13845 /* A few boards don't want Ethernet@WireSpeed phy feature */
13846 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13847 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13848 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13849 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13850 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13851 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13852 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13854 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13855 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13856 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13857 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13858 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13860 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13861 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13862 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13863 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13864 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
13865 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13866 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13867 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13868 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13869 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13870 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13871 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13872 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13873 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13875 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13878 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13879 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13880 tp->phy_otp = tg3_read_otp_phycfg(tp);
13881 if (tp->phy_otp == 0)
13882 tp->phy_otp = TG3_OTP_DEFAULT;
13885 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13886 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13888 tp->mi_mode = MAC_MI_MODE_BASE;
13890 tp->coalesce_mode = 0;
13891 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13892 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13893 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13896 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13897 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13899 err = tg3_mdio_init(tp);
13903 /* Initialize data/descriptor byte/word swapping. */
13904 val = tr32(GRC_MODE);
13905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13906 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
13907 GRC_MODE_WORD_SWAP_B2HRX_DATA |
13908 GRC_MODE_B2HRX_ENABLE |
13909 GRC_MODE_HTX2B_ENABLE |
13910 GRC_MODE_HOST_STACKUP);
13912 val &= GRC_MODE_HOST_STACKUP;
13914 tw32(GRC_MODE, val | tp->grc_mode);
13916 tg3_switch_clocks(tp);
13918 /* Clear this out for sanity. */
13919 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13921 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13923 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13924 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13925 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13927 if (chiprevid == CHIPREV_ID_5701_A0 ||
13928 chiprevid == CHIPREV_ID_5701_B0 ||
13929 chiprevid == CHIPREV_ID_5701_B2 ||
13930 chiprevid == CHIPREV_ID_5701_B5) {
13931 void __iomem *sram_base;
13933 /* Write some dummy words into the SRAM status block
13934 * area, see if it reads back correctly. If the return
13935 * value is bad, force enable the PCIX workaround.
13937 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13939 writel(0x00000000, sram_base);
13940 writel(0x00000000, sram_base + 4);
13941 writel(0xffffffff, sram_base + 4);
13942 if (readl(sram_base) != 0x00000000)
13943 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13948 tg3_nvram_init(tp);
13950 grc_misc_cfg = tr32(GRC_MISC_CFG);
13951 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13953 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13954 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13955 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13956 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13958 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13959 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13960 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13961 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13962 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13963 HOSTCC_MODE_CLRTICK_TXBD);
13965 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13966 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13967 tp->misc_host_ctrl);
13970 /* Preserve the APE MAC_MODE bits */
13971 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13972 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13974 tp->mac_mode = TG3_DEF_MAC_MODE;
13976 /* these are limited to 10/100 only */
13977 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13978 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13979 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13980 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13981 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13982 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13983 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13984 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13985 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13986 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13987 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13988 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13989 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13990 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13991 (tp->phy_flags & TG3_PHYFLG_IS_FET))
13992 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
13994 err = tg3_phy_probe(tp);
13996 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13997 /* ... but do not return immediately ... */
14002 tg3_read_fw_ver(tp);
14004 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14005 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14008 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14010 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14013 /* 5700 {AX,BX} chips have a broken status block link
14014 * change bit implementation, so we must use the
14015 * status register in those cases.
14017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14018 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
14020 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
14022 /* The led_ctrl is set during tg3_phy_probe, here we might
14023 * have to force the link status polling mechanism based
14024 * upon subsystem IDs.
14026 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14027 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14028 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14029 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14030 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
14033 /* For all SERDES we poll the MAC status register. */
14034 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14035 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
14037 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
14039 tp->rx_offset = NET_IP_ALIGN;
14040 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14041 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14042 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
14044 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14045 tp->rx_copy_thresh = ~(u16)0;
14049 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14050 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14051 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14053 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14055 /* Increment the rx prod index on the rx std ring by at most
14056 * 8 for these chips to workaround hw errata.
14058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14061 tp->rx_std_max_post = 8;
14063 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
14064 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14065 PCIE_PWR_MGMT_L1_THRESH_MSK;
14070 #ifdef CONFIG_SPARC
14071 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14073 struct net_device *dev = tp->dev;
14074 struct pci_dev *pdev = tp->pdev;
14075 struct device_node *dp = pci_device_to_OF_node(pdev);
14076 const unsigned char *addr;
14079 addr = of_get_property(dp, "local-mac-address", &len);
14080 if (addr && len == 6) {
14081 memcpy(dev->dev_addr, addr, 6);
14082 memcpy(dev->perm_addr, dev->dev_addr, 6);
14088 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14090 struct net_device *dev = tp->dev;
14092 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14093 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14098 static int __devinit tg3_get_device_address(struct tg3 *tp)
14100 struct net_device *dev = tp->dev;
14101 u32 hi, lo, mac_offset;
14104 #ifdef CONFIG_SPARC
14105 if (!tg3_get_macaddr_sparc(tp))
14110 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
14111 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
14112 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14114 if (tg3_nvram_lock(tp))
14115 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14117 tg3_nvram_unlock(tp);
14118 } else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
14119 if (PCI_FUNC(tp->pdev->devfn) & 1)
14121 if (PCI_FUNC(tp->pdev->devfn) > 1)
14122 mac_offset += 0x18c;
14123 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14126 /* First try to get it from MAC address mailbox. */
14127 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14128 if ((hi >> 16) == 0x484b) {
14129 dev->dev_addr[0] = (hi >> 8) & 0xff;
14130 dev->dev_addr[1] = (hi >> 0) & 0xff;
14132 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14133 dev->dev_addr[2] = (lo >> 24) & 0xff;
14134 dev->dev_addr[3] = (lo >> 16) & 0xff;
14135 dev->dev_addr[4] = (lo >> 8) & 0xff;
14136 dev->dev_addr[5] = (lo >> 0) & 0xff;
14138 /* Some old bootcode may report a 0 MAC address in SRAM */
14139 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14142 /* Next, try NVRAM. */
14143 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
14144 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14145 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14146 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14147 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14149 /* Finally just fetch it out of the MAC control regs. */
14151 hi = tr32(MAC_ADDR_0_HIGH);
14152 lo = tr32(MAC_ADDR_0_LOW);
14154 dev->dev_addr[5] = lo & 0xff;
14155 dev->dev_addr[4] = (lo >> 8) & 0xff;
14156 dev->dev_addr[3] = (lo >> 16) & 0xff;
14157 dev->dev_addr[2] = (lo >> 24) & 0xff;
14158 dev->dev_addr[1] = hi & 0xff;
14159 dev->dev_addr[0] = (hi >> 8) & 0xff;
14163 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14164 #ifdef CONFIG_SPARC
14165 if (!tg3_get_default_macaddr_sparc(tp))
14170 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14174 #define BOUNDARY_SINGLE_CACHELINE 1
14175 #define BOUNDARY_MULTI_CACHELINE 2
14177 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14179 int cacheline_size;
14183 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14185 cacheline_size = 1024;
14187 cacheline_size = (int) byte * 4;
14189 /* On 5703 and later chips, the boundary bits have no
14192 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14193 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14194 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
14197 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14198 goal = BOUNDARY_MULTI_CACHELINE;
14200 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14201 goal = BOUNDARY_SINGLE_CACHELINE;
14207 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
14208 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14215 /* PCI controllers on most RISC systems tend to disconnect
14216 * when a device tries to burst across a cache-line boundary.
14217 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14219 * Unfortunately, for PCI-E there are only limited
14220 * write-side controls for this, and thus for reads
14221 * we will still get the disconnects. We'll also waste
14222 * these PCI cycles for both read and write for chips
14223 * other than 5700 and 5701 which do not implement the
14226 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
14227 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
14228 switch (cacheline_size) {
14233 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14234 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14235 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14237 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14238 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14243 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14244 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14248 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14249 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14252 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14253 switch (cacheline_size) {
14257 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14258 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14259 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14265 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14266 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14270 switch (cacheline_size) {
14272 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14273 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14274 DMA_RWCTRL_WRITE_BNDRY_16);
14279 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14280 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14281 DMA_RWCTRL_WRITE_BNDRY_32);
14286 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14287 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14288 DMA_RWCTRL_WRITE_BNDRY_64);
14293 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14294 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14295 DMA_RWCTRL_WRITE_BNDRY_128);
14300 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14301 DMA_RWCTRL_WRITE_BNDRY_256);
14304 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14305 DMA_RWCTRL_WRITE_BNDRY_512);
14309 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14310 DMA_RWCTRL_WRITE_BNDRY_1024);
14319 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14321 struct tg3_internal_buffer_desc test_desc;
14322 u32 sram_dma_descs;
14325 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14327 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14328 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14329 tw32(RDMAC_STATUS, 0);
14330 tw32(WDMAC_STATUS, 0);
14332 tw32(BUFMGR_MODE, 0);
14333 tw32(FTQ_RESET, 0);
14335 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14336 test_desc.addr_lo = buf_dma & 0xffffffff;
14337 test_desc.nic_mbuf = 0x00002100;
14338 test_desc.len = size;
14341 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14342 * the *second* time the tg3 driver was getting loaded after an
14345 * Broadcom tells me:
14346 * ...the DMA engine is connected to the GRC block and a DMA
14347 * reset may affect the GRC block in some unpredictable way...
14348 * The behavior of resets to individual blocks has not been tested.
14350 * Broadcom noted the GRC reset will also reset all sub-components.
14353 test_desc.cqid_sqid = (13 << 8) | 2;
14355 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14358 test_desc.cqid_sqid = (16 << 8) | 7;
14360 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14363 test_desc.flags = 0x00000005;
14365 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14368 val = *(((u32 *)&test_desc) + i);
14369 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14370 sram_dma_descs + (i * sizeof(u32)));
14371 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14373 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14376 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14378 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14381 for (i = 0; i < 40; i++) {
14385 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14387 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14388 if ((val & 0xffff) == sram_dma_descs) {
14399 #define TEST_BUFFER_SIZE 0x2000
14401 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14402 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14406 static int __devinit tg3_test_dma(struct tg3 *tp)
14408 dma_addr_t buf_dma;
14409 u32 *buf, saved_dma_rwctrl;
14412 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14413 &buf_dma, GFP_KERNEL);
14419 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14420 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14422 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14424 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
14427 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14428 /* DMA read watermark not used on PCIE */
14429 tp->dma_rwctrl |= 0x00180000;
14430 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
14431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14432 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14433 tp->dma_rwctrl |= 0x003f0000;
14435 tp->dma_rwctrl |= 0x003f000f;
14437 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14438 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14439 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14440 u32 read_water = 0x7;
14442 /* If the 5704 is behind the EPB bridge, we can
14443 * do the less restrictive ONE_DMA workaround for
14444 * better performance.
14446 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
14447 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14448 tp->dma_rwctrl |= 0x8000;
14449 else if (ccval == 0x6 || ccval == 0x7)
14450 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14454 /* Set bit 23 to enable PCIX hw bug fix */
14456 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14457 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14459 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14460 /* 5780 always in PCIX mode */
14461 tp->dma_rwctrl |= 0x00144000;
14462 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14463 /* 5714 always in PCIX mode */
14464 tp->dma_rwctrl |= 0x00148000;
14466 tp->dma_rwctrl |= 0x001b000f;
14470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14472 tp->dma_rwctrl &= 0xfffffff0;
14474 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14476 /* Remove this if it causes problems for some boards. */
14477 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14479 /* On 5700/5701 chips, we need to set this bit.
14480 * Otherwise the chip will issue cacheline transactions
14481 * to streamable DMA memory with not all the byte
14482 * enables turned on. This is an error on several
14483 * RISC PCI controllers, in particular sparc64.
14485 * On 5703/5704 chips, this bit has been reassigned
14486 * a different meaning. In particular, it is used
14487 * on those chips to enable a PCI-X workaround.
14489 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14492 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14495 /* Unneeded, already done by tg3_get_invariants. */
14496 tg3_switch_clocks(tp);
14499 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14500 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14503 /* It is best to perform DMA test with maximum write burst size
14504 * to expose the 5700/5701 write DMA bug.
14506 saved_dma_rwctrl = tp->dma_rwctrl;
14507 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14508 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14513 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14516 /* Send the buffer to the chip. */
14517 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14519 dev_err(&tp->pdev->dev,
14520 "%s: Buffer write failed. err = %d\n",
14526 /* validate data reached card RAM correctly. */
14527 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14529 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14530 if (le32_to_cpu(val) != p[i]) {
14531 dev_err(&tp->pdev->dev,
14532 "%s: Buffer corrupted on device! "
14533 "(%d != %d)\n", __func__, val, i);
14534 /* ret = -ENODEV here? */
14539 /* Now read it back. */
14540 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14542 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14543 "err = %d\n", __func__, ret);
14548 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14552 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14553 DMA_RWCTRL_WRITE_BNDRY_16) {
14554 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14555 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14556 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14559 dev_err(&tp->pdev->dev,
14560 "%s: Buffer corrupted on read back! "
14561 "(%d != %d)\n", __func__, p[i], i);
14567 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14573 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14574 DMA_RWCTRL_WRITE_BNDRY_16) {
14576 /* DMA test passed without adjusting DMA boundary,
14577 * now look for chipsets that are known to expose the
14578 * DMA bug without failing the test.
14580 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14581 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14582 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14584 /* Safe to use the calculated DMA boundary. */
14585 tp->dma_rwctrl = saved_dma_rwctrl;
14588 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14592 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14597 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14599 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
14600 tp->bufmgr_config.mbuf_read_dma_low_water =
14601 DEFAULT_MB_RDMA_LOW_WATER_5705;
14602 tp->bufmgr_config.mbuf_mac_rx_low_water =
14603 DEFAULT_MB_MACRX_LOW_WATER_57765;
14604 tp->bufmgr_config.mbuf_high_water =
14605 DEFAULT_MB_HIGH_WATER_57765;
14607 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14608 DEFAULT_MB_RDMA_LOW_WATER_5705;
14609 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14610 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14611 tp->bufmgr_config.mbuf_high_water_jumbo =
14612 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14613 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14614 tp->bufmgr_config.mbuf_read_dma_low_water =
14615 DEFAULT_MB_RDMA_LOW_WATER_5705;
14616 tp->bufmgr_config.mbuf_mac_rx_low_water =
14617 DEFAULT_MB_MACRX_LOW_WATER_5705;
14618 tp->bufmgr_config.mbuf_high_water =
14619 DEFAULT_MB_HIGH_WATER_5705;
14620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14621 tp->bufmgr_config.mbuf_mac_rx_low_water =
14622 DEFAULT_MB_MACRX_LOW_WATER_5906;
14623 tp->bufmgr_config.mbuf_high_water =
14624 DEFAULT_MB_HIGH_WATER_5906;
14627 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14628 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14629 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14630 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14631 tp->bufmgr_config.mbuf_high_water_jumbo =
14632 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14634 tp->bufmgr_config.mbuf_read_dma_low_water =
14635 DEFAULT_MB_RDMA_LOW_WATER;
14636 tp->bufmgr_config.mbuf_mac_rx_low_water =
14637 DEFAULT_MB_MACRX_LOW_WATER;
14638 tp->bufmgr_config.mbuf_high_water =
14639 DEFAULT_MB_HIGH_WATER;
14641 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14642 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14643 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14644 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14645 tp->bufmgr_config.mbuf_high_water_jumbo =
14646 DEFAULT_MB_HIGH_WATER_JUMBO;
14649 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14650 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14653 static char * __devinit tg3_phy_string(struct tg3 *tp)
14655 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14656 case TG3_PHY_ID_BCM5400: return "5400";
14657 case TG3_PHY_ID_BCM5401: return "5401";
14658 case TG3_PHY_ID_BCM5411: return "5411";
14659 case TG3_PHY_ID_BCM5701: return "5701";
14660 case TG3_PHY_ID_BCM5703: return "5703";
14661 case TG3_PHY_ID_BCM5704: return "5704";
14662 case TG3_PHY_ID_BCM5705: return "5705";
14663 case TG3_PHY_ID_BCM5750: return "5750";
14664 case TG3_PHY_ID_BCM5752: return "5752";
14665 case TG3_PHY_ID_BCM5714: return "5714";
14666 case TG3_PHY_ID_BCM5780: return "5780";
14667 case TG3_PHY_ID_BCM5755: return "5755";
14668 case TG3_PHY_ID_BCM5787: return "5787";
14669 case TG3_PHY_ID_BCM5784: return "5784";
14670 case TG3_PHY_ID_BCM5756: return "5722/5756";
14671 case TG3_PHY_ID_BCM5906: return "5906";
14672 case TG3_PHY_ID_BCM5761: return "5761";
14673 case TG3_PHY_ID_BCM5718C: return "5718C";
14674 case TG3_PHY_ID_BCM5718S: return "5718S";
14675 case TG3_PHY_ID_BCM57765: return "57765";
14676 case TG3_PHY_ID_BCM5719C: return "5719C";
14677 case TG3_PHY_ID_BCM5720C: return "5720C";
14678 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14679 case 0: return "serdes";
14680 default: return "unknown";
14684 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14686 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14687 strcpy(str, "PCI Express");
14689 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14690 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14692 strcpy(str, "PCIX:");
14694 if ((clock_ctrl == 7) ||
14695 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14696 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14697 strcat(str, "133MHz");
14698 else if (clock_ctrl == 0)
14699 strcat(str, "33MHz");
14700 else if (clock_ctrl == 2)
14701 strcat(str, "50MHz");
14702 else if (clock_ctrl == 4)
14703 strcat(str, "66MHz");
14704 else if (clock_ctrl == 6)
14705 strcat(str, "100MHz");
14707 strcpy(str, "PCI:");
14708 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14709 strcat(str, "66MHz");
14711 strcat(str, "33MHz");
14713 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14714 strcat(str, ":32-bit");
14716 strcat(str, ":64-bit");
14720 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14722 struct pci_dev *peer;
14723 unsigned int func, devnr = tp->pdev->devfn & ~7;
14725 for (func = 0; func < 8; func++) {
14726 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14727 if (peer && peer != tp->pdev)
14731 /* 5704 can be configured in single-port mode, set peer to
14732 * tp->pdev in that case.
14740 * We don't need to keep the refcount elevated; there's no way
14741 * to remove one half of this device without removing the other
14748 static void __devinit tg3_init_coal(struct tg3 *tp)
14750 struct ethtool_coalesce *ec = &tp->coal;
14752 memset(ec, 0, sizeof(*ec));
14753 ec->cmd = ETHTOOL_GCOALESCE;
14754 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14755 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14756 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14757 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14758 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14759 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14760 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14761 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14762 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14764 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14765 HOSTCC_MODE_CLRTICK_TXBD)) {
14766 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14767 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14768 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14769 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14772 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14773 ec->rx_coalesce_usecs_irq = 0;
14774 ec->tx_coalesce_usecs_irq = 0;
14775 ec->stats_block_coalesce_usecs = 0;
14779 static const struct net_device_ops tg3_netdev_ops = {
14780 .ndo_open = tg3_open,
14781 .ndo_stop = tg3_close,
14782 .ndo_start_xmit = tg3_start_xmit,
14783 .ndo_get_stats64 = tg3_get_stats64,
14784 .ndo_validate_addr = eth_validate_addr,
14785 .ndo_set_multicast_list = tg3_set_rx_mode,
14786 .ndo_set_mac_address = tg3_set_mac_addr,
14787 .ndo_do_ioctl = tg3_ioctl,
14788 .ndo_tx_timeout = tg3_tx_timeout,
14789 .ndo_change_mtu = tg3_change_mtu,
14790 #ifdef CONFIG_NET_POLL_CONTROLLER
14791 .ndo_poll_controller = tg3_poll_controller,
14795 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14796 .ndo_open = tg3_open,
14797 .ndo_stop = tg3_close,
14798 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14799 .ndo_get_stats64 = tg3_get_stats64,
14800 .ndo_validate_addr = eth_validate_addr,
14801 .ndo_set_multicast_list = tg3_set_rx_mode,
14802 .ndo_set_mac_address = tg3_set_mac_addr,
14803 .ndo_do_ioctl = tg3_ioctl,
14804 .ndo_tx_timeout = tg3_tx_timeout,
14805 .ndo_change_mtu = tg3_change_mtu,
14806 #ifdef CONFIG_NET_POLL_CONTROLLER
14807 .ndo_poll_controller = tg3_poll_controller,
14811 static int __devinit tg3_init_one(struct pci_dev *pdev,
14812 const struct pci_device_id *ent)
14814 struct net_device *dev;
14816 int i, err, pm_cap;
14817 u32 sndmbx, rcvmbx, intmbx;
14819 u64 dma_mask, persist_dma_mask;
14821 printk_once(KERN_INFO "%s\n", version);
14823 err = pci_enable_device(pdev);
14825 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14829 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14831 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14832 goto err_out_disable_pdev;
14835 pci_set_master(pdev);
14837 /* Find power-management capability. */
14838 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14840 dev_err(&pdev->dev,
14841 "Cannot find Power Management capability, aborting\n");
14843 goto err_out_free_res;
14846 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14848 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14850 goto err_out_free_res;
14853 SET_NETDEV_DEV(dev, &pdev->dev);
14855 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14857 tp = netdev_priv(dev);
14860 tp->pm_cap = pm_cap;
14861 tp->rx_mode = TG3_DEF_RX_MODE;
14862 tp->tx_mode = TG3_DEF_TX_MODE;
14865 tp->msg_enable = tg3_debug;
14867 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14869 /* The word/byte swap controls here control register access byte
14870 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14873 tp->misc_host_ctrl =
14874 MISC_HOST_CTRL_MASK_PCI_INT |
14875 MISC_HOST_CTRL_WORD_SWAP |
14876 MISC_HOST_CTRL_INDIR_ACCESS |
14877 MISC_HOST_CTRL_PCISTATE_RW;
14879 /* The NONFRM (non-frame) byte/word swap controls take effect
14880 * on descriptor entries, anything which isn't packet data.
14882 * The StrongARM chips on the board (one for tx, one for rx)
14883 * are running in big-endian mode.
14885 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14886 GRC_MODE_WSWAP_NONFRM_DATA);
14887 #ifdef __BIG_ENDIAN
14888 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14890 spin_lock_init(&tp->lock);
14891 spin_lock_init(&tp->indirect_lock);
14892 INIT_WORK(&tp->reset_task, tg3_reset_task);
14894 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14896 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14898 goto err_out_free_dev;
14901 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14902 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14904 dev->ethtool_ops = &tg3_ethtool_ops;
14905 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14906 dev->irq = pdev->irq;
14908 err = tg3_get_invariants(tp);
14910 dev_err(&pdev->dev,
14911 "Problem fetching invariants of chip, aborting\n");
14912 goto err_out_iounmap;
14915 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14916 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
14917 dev->netdev_ops = &tg3_netdev_ops;
14919 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14922 /* The EPB bridge inside 5714, 5715, and 5780 and any
14923 * device behind the EPB cannot support DMA addresses > 40-bit.
14924 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14925 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14926 * do DMA address check in tg3_start_xmit().
14928 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14929 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14930 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14931 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14932 #ifdef CONFIG_HIGHMEM
14933 dma_mask = DMA_BIT_MASK(64);
14936 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14938 /* Configure DMA attributes. */
14939 if (dma_mask > DMA_BIT_MASK(32)) {
14940 err = pci_set_dma_mask(pdev, dma_mask);
14942 dev->features |= NETIF_F_HIGHDMA;
14943 err = pci_set_consistent_dma_mask(pdev,
14946 dev_err(&pdev->dev, "Unable to obtain 64 bit "
14947 "DMA for consistent allocations\n");
14948 goto err_out_iounmap;
14952 if (err || dma_mask == DMA_BIT_MASK(32)) {
14953 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14955 dev_err(&pdev->dev,
14956 "No usable DMA configuration, aborting\n");
14957 goto err_out_iounmap;
14961 tg3_init_bufmgr_config(tp);
14963 /* Selectively allow TSO based on operating conditions */
14964 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14965 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14966 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14968 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14969 tp->fw_needed = NULL;
14972 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14973 tp->fw_needed = FIRMWARE_TG3;
14975 /* TSO is on by default on chips that support hardware TSO.
14976 * Firmware TSO on older chips gives lower performance, so it
14977 * is off by default, but can be enabled using ethtool.
14979 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14980 (dev->features & NETIF_F_IP_CSUM)) {
14981 dev->features |= NETIF_F_TSO;
14982 vlan_features_add(dev, NETIF_F_TSO);
14984 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14985 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14986 if (dev->features & NETIF_F_IPV6_CSUM) {
14987 dev->features |= NETIF_F_TSO6;
14988 vlan_features_add(dev, NETIF_F_TSO6);
14990 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14992 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14993 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14996 dev->features |= NETIF_F_TSO_ECN;
14997 vlan_features_add(dev, NETIF_F_TSO_ECN);
15001 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15002 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
15003 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15004 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
15005 tp->rx_pending = 63;
15008 err = tg3_get_device_address(tp);
15010 dev_err(&pdev->dev,
15011 "Could not obtain valid ethernet address, aborting\n");
15012 goto err_out_iounmap;
15015 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
15016 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15017 if (!tp->aperegs) {
15018 dev_err(&pdev->dev,
15019 "Cannot map APE registers, aborting\n");
15021 goto err_out_iounmap;
15024 tg3_ape_lock_init(tp);
15026 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
15027 tg3_read_dash_ver(tp);
15031 * Reset chip in case UNDI or EFI driver did not shutdown
15032 * DMA self test will enable WDMAC and we'll see (spurious)
15033 * pending DMA on the PCI bus at that point.
15035 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15036 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15037 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15038 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15041 err = tg3_test_dma(tp);
15043 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15044 goto err_out_apeunmap;
15047 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15048 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15049 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15050 for (i = 0; i < tp->irq_max; i++) {
15051 struct tg3_napi *tnapi = &tp->napi[i];
15054 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15056 tnapi->int_mbox = intmbx;
15062 tnapi->consmbox = rcvmbx;
15063 tnapi->prodmbox = sndmbx;
15066 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15068 tnapi->coal_now = HOSTCC_MODE_NOW;
15070 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
15074 * If we support MSIX, we'll be using RSS. If we're using
15075 * RSS, the first vector only handles link interrupts and the
15076 * remaining vectors handle rx and tx interrupts. Reuse the
15077 * mailbox values for the next iteration. The values we setup
15078 * above are still useful for the single vectored mode.
15093 pci_set_drvdata(pdev, dev);
15095 err = register_netdev(dev);
15097 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15098 goto err_out_apeunmap;
15101 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15102 tp->board_part_number,
15103 tp->pci_chip_rev_id,
15104 tg3_bus_string(tp, str),
15107 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15108 struct phy_device *phydev;
15109 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15111 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15112 phydev->drv->name, dev_name(&phydev->dev));
15116 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15117 ethtype = "10/100Base-TX";
15118 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15119 ethtype = "1000Base-SX";
15121 ethtype = "10/100/1000Base-T";
15123 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15124 "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
15125 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
15128 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15129 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
15130 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
15131 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15132 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
15133 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
15134 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15136 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15137 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15143 iounmap(tp->aperegs);
15144 tp->aperegs = NULL;
15157 pci_release_regions(pdev);
15159 err_out_disable_pdev:
15160 pci_disable_device(pdev);
15161 pci_set_drvdata(pdev, NULL);
15165 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15167 struct net_device *dev = pci_get_drvdata(pdev);
15170 struct tg3 *tp = netdev_priv(dev);
15173 release_firmware(tp->fw);
15175 cancel_work_sync(&tp->reset_task);
15177 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
15182 unregister_netdev(dev);
15184 iounmap(tp->aperegs);
15185 tp->aperegs = NULL;
15192 pci_release_regions(pdev);
15193 pci_disable_device(pdev);
15194 pci_set_drvdata(pdev, NULL);
15198 #ifdef CONFIG_PM_SLEEP
15199 static int tg3_suspend(struct device *device)
15201 struct pci_dev *pdev = to_pci_dev(device);
15202 struct net_device *dev = pci_get_drvdata(pdev);
15203 struct tg3 *tp = netdev_priv(dev);
15206 if (!netif_running(dev))
15209 flush_work_sync(&tp->reset_task);
15211 tg3_netif_stop(tp);
15213 del_timer_sync(&tp->timer);
15215 tg3_full_lock(tp, 1);
15216 tg3_disable_ints(tp);
15217 tg3_full_unlock(tp);
15219 netif_device_detach(dev);
15221 tg3_full_lock(tp, 0);
15222 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15223 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
15224 tg3_full_unlock(tp);
15226 err = tg3_power_down_prepare(tp);
15230 tg3_full_lock(tp, 0);
15232 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15233 err2 = tg3_restart_hw(tp, 1);
15237 tp->timer.expires = jiffies + tp->timer_offset;
15238 add_timer(&tp->timer);
15240 netif_device_attach(dev);
15241 tg3_netif_start(tp);
15244 tg3_full_unlock(tp);
15253 static int tg3_resume(struct device *device)
15255 struct pci_dev *pdev = to_pci_dev(device);
15256 struct net_device *dev = pci_get_drvdata(pdev);
15257 struct tg3 *tp = netdev_priv(dev);
15260 if (!netif_running(dev))
15263 netif_device_attach(dev);
15265 tg3_full_lock(tp, 0);
15267 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15268 err = tg3_restart_hw(tp, 1);
15272 tp->timer.expires = jiffies + tp->timer_offset;
15273 add_timer(&tp->timer);
15275 tg3_netif_start(tp);
15278 tg3_full_unlock(tp);
15286 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15287 #define TG3_PM_OPS (&tg3_pm_ops)
15291 #define TG3_PM_OPS NULL
15293 #endif /* CONFIG_PM_SLEEP */
15295 static struct pci_driver tg3_driver = {
15296 .name = DRV_MODULE_NAME,
15297 .id_table = tg3_pci_tbl,
15298 .probe = tg3_init_one,
15299 .remove = __devexit_p(tg3_remove_one),
15300 .driver.pm = TG3_PM_OPS,
15303 static int __init tg3_init(void)
15305 return pci_register_driver(&tg3_driver);
15308 static void __exit tg3_cleanup(void)
15310 pci_unregister_driver(&tg3_driver);
15313 module_init(tg3_init);
15314 module_exit(tg3_cleanup);