2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MIN_NUM 123
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "March 21, 2012"
96 #define RESET_KIND_SHUTDOWN 0
97 #define RESET_KIND_INIT 1
98 #define RESET_KIND_SUSPEND 2
100 #define TG3_DEF_RX_MODE 0
101 #define TG3_DEF_TX_MODE 0
102 #define TG3_DEF_MSG_ENABLE \
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
114 /* length of time before we decide the hardware is borked,
115 * and dev->tx_timeout() should be called to fix the problem
118 #define TG3_TX_TIMEOUT (5 * HZ)
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU 60
122 #define TG3_MAX_MTU(tp) \
123 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126 * You can't change the ring sizes, but you can change where you place
127 * them in the NIC onboard memory.
129 #define TG3_RX_STD_RING_SIZE(tp) \
130 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING 200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 /* Do not place this n-ring entries value into the tp struct itself,
139 * we really want to expose these constants to GCC so that modulo et
140 * al. operations are done with shifts and masks instead of with
141 * hw multiply/modulo instructions. Another solution would be to
142 * replace things like '% foo' with '& (foo - 1)'.
145 #define TG3_TX_RING_SIZE 512
146 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
148 #define TG3_RX_STD_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
156 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158 #define TG3_DMA_BYTE_ENAB 64
160 #define TG3_RX_STD_DMA_SZ 1536
161 #define TG3_RX_JMB_DMA_SZ 9046
163 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
165 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175 * that are at least dword aligned when used in PCIX mode. The driver
176 * works around this bug by double copying the packet. This workaround
177 * is built into the normal double copy length check for efficiency.
179 * However, the double copy is only necessary on those architectures
180 * where unaligned memory accesses are inefficient. For those architectures
181 * where unaligned memory accesses incur little penalty, we can reintegrate
182 * the 5701 in the normal rx path. Doing so saves a device structure
183 * dereference by hardcoding the double copy threshold in place.
185 #define TG3_RX_COPY_THRESHOLD 256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
189 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
195 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K 2048
201 #define TG3_TX_BD_DMA_MAX_4K 4096
203 #define TG3_RAW_IP_ALIGN 2
205 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
206 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version[] __devinitdata =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314 static const struct {
315 const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
349 { "tx_flow_control" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 writel(val, tp->regs + off);
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 return readl(tp->regs + off);
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 writel(val, tp->aperegs + off);
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 return readl(tp->aperegs + off);
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 writel(val, tp->regs + off);
449 readl(tp->regs + off);
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
468 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 TG3_64BIT_REG_LOW, val);
473 if (off == TG3_RX_STD_PROD_IDX_REG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 TG3_64BIT_REG_LOW, val);
479 spin_lock_irqsave(&tp->indirect_lock, flags);
480 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 spin_unlock_irqrestore(&tp->indirect_lock, flags);
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 /* Non-posted methods */
515 tp->write32(tp, off, val);
518 tg3_write32(tp, off, val);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 tp->write32_mbox(tp, off, val);
533 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 tp->read32_mbox(tp, off);
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 void __iomem *mbox = tp->regs + off;
541 if (tg3_flag(tp, TXD_MBOX_HWBUG))
543 if (tg3_flag(tp, MBOX_WRITE_REORDER))
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 return readl(tp->regs + off + GRCMBOX_BASE);
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 writel(val, tp->regs + off + GRCMBOX_BASE);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
576 spin_lock_irqsave(&tp->indirect_lock, flags);
577 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590 spin_unlock_irqrestore(&tp->indirect_lock, flags);
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
603 spin_lock_irqsave(&tp->indirect_lock, flags);
604 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 *val = tr32(TG3PCI_MEM_WIN_DATA);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617 spin_unlock_irqrestore(&tp->indirect_lock, flags);
620 static void tg3_ape_lock_init(struct tg3 *tp)
625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 regbase = TG3_APE_LOCK_GRANT;
628 regbase = TG3_APE_PER_LOCK_GRANT;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
633 case TG3_APE_LOCK_PHY0:
634 case TG3_APE_LOCK_PHY1:
635 case TG3_APE_LOCK_PHY2:
636 case TG3_APE_LOCK_PHY3:
637 bit = APE_LOCK_GRANT_DRIVER;
641 bit = APE_LOCK_GRANT_DRIVER;
643 bit = 1 << tp->pci_fn;
645 tg3_ape_write32(tp, regbase + 4 * i, bit);
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
654 u32 status, req, gnt, bit;
656 if (!tg3_flag(tp, ENABLE_APE))
660 case TG3_APE_LOCK_GPIO:
661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663 case TG3_APE_LOCK_GRC:
664 case TG3_APE_LOCK_MEM:
666 bit = APE_LOCK_REQ_DRIVER;
668 bit = 1 << tp->pci_fn;
674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675 req = TG3_APE_LOCK_REQ;
676 gnt = TG3_APE_LOCK_GRANT;
678 req = TG3_APE_PER_LOCK_REQ;
679 gnt = TG3_APE_PER_LOCK_GRANT;
684 tg3_ape_write32(tp, req + off, bit);
686 /* Wait for up to 1 millisecond to acquire lock. */
687 for (i = 0; i < 100; i++) {
688 status = tg3_ape_read32(tp, gnt + off);
695 /* Revoke the lock request. */
696 tg3_ape_write32(tp, gnt + off, bit);
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
707 if (!tg3_flag(tp, ENABLE_APE))
711 case TG3_APE_LOCK_GPIO:
712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
714 case TG3_APE_LOCK_GRC:
715 case TG3_APE_LOCK_MEM:
717 bit = APE_LOCK_GRANT_DRIVER;
719 bit = 1 << tp->pci_fn;
725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726 gnt = TG3_APE_LOCK_GRANT;
728 gnt = TG3_APE_PER_LOCK_GRANT;
730 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
738 /* NCSI does not support APE events */
739 if (tg3_flag(tp, APE_HAS_NCSI))
742 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 if (apedata != APE_SEG_SIG_MAGIC)
746 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 if (!(apedata & APE_FW_STATUS_READY))
750 /* Wait for up to 1 millisecond for APE to service previous event. */
751 for (i = 0; i < 10; i++) {
752 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
755 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759 event | APE_EVENT_STATUS_EVENT_PENDING);
761 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
769 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
778 if (!tg3_flag(tp, ENABLE_APE))
782 case RESET_KIND_INIT:
783 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784 APE_HOST_SEG_SIG_MAGIC);
785 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786 APE_HOST_SEG_LEN_MAGIC);
787 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792 APE_HOST_BEHAV_NO_PHYLOCK);
793 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794 TG3_APE_HOST_DRVR_STATE_START);
796 event = APE_EVENT_STATUS_STATE_START;
798 case RESET_KIND_SHUTDOWN:
799 /* With the interface we are currently using,
800 * APE does not track driver state. Wiping
801 * out the HOST SEGMENT SIGNATURE forces
802 * the APE to assume OS absent status.
804 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
806 if (device_may_wakeup(&tp->pdev->dev) &&
807 tg3_flag(tp, WOL_ENABLE)) {
808 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809 TG3_APE_HOST_WOL_SPEED_AUTO);
810 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
812 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
814 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
816 event = APE_EVENT_STATUS_STATE_UNLOAD;
818 case RESET_KIND_SUSPEND:
819 event = APE_EVENT_STATUS_STATE_SUSPEND;
825 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
827 tg3_ape_send_event(tp, event);
830 static void tg3_disable_ints(struct tg3 *tp)
834 tw32(TG3PCI_MISC_HOST_CTRL,
835 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836 for (i = 0; i < tp->irq_max; i++)
837 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
840 static void tg3_enable_ints(struct tg3 *tp)
847 tw32(TG3PCI_MISC_HOST_CTRL,
848 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
850 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851 for (i = 0; i < tp->irq_cnt; i++) {
852 struct tg3_napi *tnapi = &tp->napi[i];
854 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855 if (tg3_flag(tp, 1SHOT_MSI))
856 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
858 tp->coal_now |= tnapi->coal_now;
861 /* Force an initial interrupt */
862 if (!tg3_flag(tp, TAGGED_STATUS) &&
863 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
866 tw32(HOSTCC_MODE, tp->coal_now);
868 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
873 struct tg3 *tp = tnapi->tp;
874 struct tg3_hw_status *sblk = tnapi->hw_status;
875 unsigned int work_exists = 0;
877 /* check for phy events */
878 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879 if (sblk->status & SD_STATUS_LINK_CHG)
883 /* check for TX work to do */
884 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
887 /* check for RX work to do */
888 if (tnapi->rx_rcb_prod_idx &&
889 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
896 * similar to tg3_enable_ints, but it accurately determines whether there
897 * is new work pending and can return without flushing the PIO write
898 * which reenables interrupts
900 static void tg3_int_reenable(struct tg3_napi *tnapi)
902 struct tg3 *tp = tnapi->tp;
904 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
907 /* When doing tagged status, this work check is unnecessary.
908 * The last_tag we write above tells the chip which piece of
909 * work we've completed.
911 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
912 tw32(HOSTCC_MODE, tp->coalesce_mode |
913 HOSTCC_MODE_ENABLE | tnapi->coal_now);
916 static void tg3_switch_clocks(struct tg3 *tp)
921 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
924 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
926 orig_clock_ctrl = clock_ctrl;
927 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
928 CLOCK_CTRL_CLKRUN_OENABLE |
930 tp->pci_clock_ctrl = clock_ctrl;
932 if (tg3_flag(tp, 5705_PLUS)) {
933 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
934 tw32_wait_f(TG3PCI_CLOCK_CTRL,
935 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
937 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
938 tw32_wait_f(TG3PCI_CLOCK_CTRL,
940 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
942 tw32_wait_f(TG3PCI_CLOCK_CTRL,
943 clock_ctrl | (CLOCK_CTRL_ALTCLK),
946 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
949 #define PHY_BUSY_LOOPS 5000
951 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
957 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
959 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
965 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
966 MI_COM_PHY_ADDR_MASK);
967 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
968 MI_COM_REG_ADDR_MASK);
969 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
971 tw32_f(MAC_MI_COM, frame_val);
973 loops = PHY_BUSY_LOOPS;
976 frame_val = tr32(MAC_MI_COM);
978 if ((frame_val & MI_COM_BUSY) == 0) {
980 frame_val = tr32(MAC_MI_COM);
988 *val = frame_val & MI_COM_DATA_MASK;
992 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
993 tw32_f(MAC_MI_MODE, tp->mi_mode);
1000 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1006 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1007 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1010 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1012 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1016 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1017 MI_COM_PHY_ADDR_MASK);
1018 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1019 MI_COM_REG_ADDR_MASK);
1020 frame_val |= (val & MI_COM_DATA_MASK);
1021 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1023 tw32_f(MAC_MI_COM, frame_val);
1025 loops = PHY_BUSY_LOOPS;
1026 while (loops != 0) {
1028 frame_val = tr32(MAC_MI_COM);
1029 if ((frame_val & MI_COM_BUSY) == 0) {
1031 frame_val = tr32(MAC_MI_COM);
1041 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1042 tw32_f(MAC_MI_MODE, tp->mi_mode);
1049 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1053 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1057 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1061 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1062 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1066 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1072 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1076 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1080 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1084 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1085 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1089 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1095 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1099 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1101 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1106 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1110 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1112 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1117 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1121 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1122 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1123 MII_TG3_AUXCTL_SHDWSEL_MISC);
1125 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1130 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1132 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1133 set |= MII_TG3_AUXCTL_MISC_WREN;
1135 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1138 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1139 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1141 MII_TG3_AUXCTL_ACTL_TX_6DB)
1143 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1144 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1145 MII_TG3_AUXCTL_ACTL_TX_6DB);
1147 static int tg3_bmcr_reset(struct tg3 *tp)
1152 /* OK, reset it, and poll the BMCR_RESET bit until it
1153 * clears or we time out.
1155 phy_control = BMCR_RESET;
1156 err = tg3_writephy(tp, MII_BMCR, phy_control);
1162 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1166 if ((phy_control & BMCR_RESET) == 0) {
1178 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1180 struct tg3 *tp = bp->priv;
1183 spin_lock_bh(&tp->lock);
1185 if (tg3_readphy(tp, reg, &val))
1188 spin_unlock_bh(&tp->lock);
1193 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1195 struct tg3 *tp = bp->priv;
1198 spin_lock_bh(&tp->lock);
1200 if (tg3_writephy(tp, reg, val))
1203 spin_unlock_bh(&tp->lock);
1208 static int tg3_mdio_reset(struct mii_bus *bp)
1213 static void tg3_mdio_config_5785(struct tg3 *tp)
1216 struct phy_device *phydev;
1218 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1219 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1220 case PHY_ID_BCM50610:
1221 case PHY_ID_BCM50610M:
1222 val = MAC_PHYCFG2_50610_LED_MODES;
1224 case PHY_ID_BCMAC131:
1225 val = MAC_PHYCFG2_AC131_LED_MODES;
1227 case PHY_ID_RTL8211C:
1228 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1230 case PHY_ID_RTL8201E:
1231 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1237 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1238 tw32(MAC_PHYCFG2, val);
1240 val = tr32(MAC_PHYCFG1);
1241 val &= ~(MAC_PHYCFG1_RGMII_INT |
1242 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1243 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1244 tw32(MAC_PHYCFG1, val);
1249 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1250 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1251 MAC_PHYCFG2_FMODE_MASK_MASK |
1252 MAC_PHYCFG2_GMODE_MASK_MASK |
1253 MAC_PHYCFG2_ACT_MASK_MASK |
1254 MAC_PHYCFG2_QUAL_MASK_MASK |
1255 MAC_PHYCFG2_INBAND_ENABLE;
1257 tw32(MAC_PHYCFG2, val);
1259 val = tr32(MAC_PHYCFG1);
1260 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1261 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1262 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1263 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1264 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1265 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1266 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1268 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1269 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1270 tw32(MAC_PHYCFG1, val);
1272 val = tr32(MAC_EXT_RGMII_MODE);
1273 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1274 MAC_RGMII_MODE_RX_QUALITY |
1275 MAC_RGMII_MODE_RX_ACTIVITY |
1276 MAC_RGMII_MODE_RX_ENG_DET |
1277 MAC_RGMII_MODE_TX_ENABLE |
1278 MAC_RGMII_MODE_TX_LOWPWR |
1279 MAC_RGMII_MODE_TX_RESET);
1280 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1281 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1282 val |= MAC_RGMII_MODE_RX_INT_B |
1283 MAC_RGMII_MODE_RX_QUALITY |
1284 MAC_RGMII_MODE_RX_ACTIVITY |
1285 MAC_RGMII_MODE_RX_ENG_DET;
1286 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1287 val |= MAC_RGMII_MODE_TX_ENABLE |
1288 MAC_RGMII_MODE_TX_LOWPWR |
1289 MAC_RGMII_MODE_TX_RESET;
1291 tw32(MAC_EXT_RGMII_MODE, val);
1294 static void tg3_mdio_start(struct tg3 *tp)
1296 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1297 tw32_f(MAC_MI_MODE, tp->mi_mode);
1300 if (tg3_flag(tp, MDIOBUS_INITED) &&
1301 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1302 tg3_mdio_config_5785(tp);
1305 static int tg3_mdio_init(struct tg3 *tp)
1309 struct phy_device *phydev;
1311 if (tg3_flag(tp, 5717_PLUS)) {
1314 tp->phy_addr = tp->pci_fn + 1;
1316 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1317 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1319 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1320 TG3_CPMU_PHY_STRAP_IS_SERDES;
1324 tp->phy_addr = TG3_PHY_MII_ADDR;
1328 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1331 tp->mdio_bus = mdiobus_alloc();
1332 if (tp->mdio_bus == NULL)
1335 tp->mdio_bus->name = "tg3 mdio bus";
1336 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1337 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1338 tp->mdio_bus->priv = tp;
1339 tp->mdio_bus->parent = &tp->pdev->dev;
1340 tp->mdio_bus->read = &tg3_mdio_read;
1341 tp->mdio_bus->write = &tg3_mdio_write;
1342 tp->mdio_bus->reset = &tg3_mdio_reset;
1343 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1344 tp->mdio_bus->irq = &tp->mdio_irq[0];
1346 for (i = 0; i < PHY_MAX_ADDR; i++)
1347 tp->mdio_bus->irq[i] = PHY_POLL;
1349 /* The bus registration will look for all the PHYs on the mdio bus.
1350 * Unfortunately, it does not ensure the PHY is powered up before
1351 * accessing the PHY ID registers. A chip reset is the
1352 * quickest way to bring the device back to an operational state..
1354 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1357 i = mdiobus_register(tp->mdio_bus);
1359 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1360 mdiobus_free(tp->mdio_bus);
1364 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1366 if (!phydev || !phydev->drv) {
1367 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1368 mdiobus_unregister(tp->mdio_bus);
1369 mdiobus_free(tp->mdio_bus);
1373 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1374 case PHY_ID_BCM57780:
1375 phydev->interface = PHY_INTERFACE_MODE_GMII;
1376 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1378 case PHY_ID_BCM50610:
1379 case PHY_ID_BCM50610M:
1380 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1381 PHY_BRCM_RX_REFCLK_UNUSED |
1382 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1383 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1384 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1385 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1386 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1387 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1388 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1389 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1391 case PHY_ID_RTL8211C:
1392 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1394 case PHY_ID_RTL8201E:
1395 case PHY_ID_BCMAC131:
1396 phydev->interface = PHY_INTERFACE_MODE_MII;
1397 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1398 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1402 tg3_flag_set(tp, MDIOBUS_INITED);
1404 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1405 tg3_mdio_config_5785(tp);
1410 static void tg3_mdio_fini(struct tg3 *tp)
1412 if (tg3_flag(tp, MDIOBUS_INITED)) {
1413 tg3_flag_clear(tp, MDIOBUS_INITED);
1414 mdiobus_unregister(tp->mdio_bus);
1415 mdiobus_free(tp->mdio_bus);
1419 /* tp->lock is held. */
1420 static inline void tg3_generate_fw_event(struct tg3 *tp)
1424 val = tr32(GRC_RX_CPU_EVENT);
1425 val |= GRC_RX_CPU_DRIVER_EVENT;
1426 tw32_f(GRC_RX_CPU_EVENT, val);
1428 tp->last_event_jiffies = jiffies;
1431 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1433 /* tp->lock is held. */
1434 static void tg3_wait_for_event_ack(struct tg3 *tp)
1437 unsigned int delay_cnt;
1440 /* If enough time has passed, no wait is necessary. */
1441 time_remain = (long)(tp->last_event_jiffies + 1 +
1442 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1444 if (time_remain < 0)
1447 /* Check if we can shorten the wait time. */
1448 delay_cnt = jiffies_to_usecs(time_remain);
1449 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1450 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1451 delay_cnt = (delay_cnt >> 3) + 1;
1453 for (i = 0; i < delay_cnt; i++) {
1454 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1460 /* tp->lock is held. */
1461 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1466 if (!tg3_readphy(tp, MII_BMCR, ®))
1468 if (!tg3_readphy(tp, MII_BMSR, ®))
1469 val |= (reg & 0xffff);
1473 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1475 if (!tg3_readphy(tp, MII_LPA, ®))
1476 val |= (reg & 0xffff);
1480 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1481 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1483 if (!tg3_readphy(tp, MII_STAT1000, ®))
1484 val |= (reg & 0xffff);
1488 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1495 /* tp->lock is held. */
1496 static void tg3_ump_link_report(struct tg3 *tp)
1500 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1503 tg3_phy_gather_ump_data(tp, data);
1505 tg3_wait_for_event_ack(tp);
1507 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1508 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1509 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1510 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1511 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1512 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1514 tg3_generate_fw_event(tp);
1517 /* tp->lock is held. */
1518 static void tg3_stop_fw(struct tg3 *tp)
1520 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1521 /* Wait for RX cpu to ACK the previous event. */
1522 tg3_wait_for_event_ack(tp);
1524 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1526 tg3_generate_fw_event(tp);
1528 /* Wait for RX cpu to ACK this event. */
1529 tg3_wait_for_event_ack(tp);
1533 /* tp->lock is held. */
1534 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1536 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1537 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1539 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1541 case RESET_KIND_INIT:
1542 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1546 case RESET_KIND_SHUTDOWN:
1547 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1551 case RESET_KIND_SUSPEND:
1552 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1561 if (kind == RESET_KIND_INIT ||
1562 kind == RESET_KIND_SUSPEND)
1563 tg3_ape_driver_state_change(tp, kind);
1566 /* tp->lock is held. */
1567 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1569 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1571 case RESET_KIND_INIT:
1572 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1573 DRV_STATE_START_DONE);
1576 case RESET_KIND_SHUTDOWN:
1577 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1578 DRV_STATE_UNLOAD_DONE);
1586 if (kind == RESET_KIND_SHUTDOWN)
1587 tg3_ape_driver_state_change(tp, kind);
1590 /* tp->lock is held. */
1591 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1593 if (tg3_flag(tp, ENABLE_ASF)) {
1595 case RESET_KIND_INIT:
1596 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1600 case RESET_KIND_SHUTDOWN:
1601 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1605 case RESET_KIND_SUSPEND:
1606 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1616 static int tg3_poll_fw(struct tg3 *tp)
1621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1622 /* Wait up to 20ms for init done. */
1623 for (i = 0; i < 200; i++) {
1624 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1631 /* Wait for firmware initialization to complete. */
1632 for (i = 0; i < 100000; i++) {
1633 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1634 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1639 /* Chip might not be fitted with firmware. Some Sun onboard
1640 * parts are configured like that. So don't signal the timeout
1641 * of the above loop as an error, but do report the lack of
1642 * running firmware once.
1644 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1645 tg3_flag_set(tp, NO_FWARE_REPORTED);
1647 netdev_info(tp->dev, "No firmware running\n");
1650 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1651 /* The 57765 A0 needs a little more
1652 * time to do some important work.
1660 static void tg3_link_report(struct tg3 *tp)
1662 if (!netif_carrier_ok(tp->dev)) {
1663 netif_info(tp, link, tp->dev, "Link is down\n");
1664 tg3_ump_link_report(tp);
1665 } else if (netif_msg_link(tp)) {
1666 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1667 (tp->link_config.active_speed == SPEED_1000 ?
1669 (tp->link_config.active_speed == SPEED_100 ?
1671 (tp->link_config.active_duplex == DUPLEX_FULL ?
1674 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1675 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1677 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1680 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1681 netdev_info(tp->dev, "EEE is %s\n",
1682 tp->setlpicnt ? "enabled" : "disabled");
1684 tg3_ump_link_report(tp);
1688 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1692 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1693 miireg = ADVERTISE_1000XPAUSE;
1694 else if (flow_ctrl & FLOW_CTRL_TX)
1695 miireg = ADVERTISE_1000XPSE_ASYM;
1696 else if (flow_ctrl & FLOW_CTRL_RX)
1697 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1704 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1708 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1709 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1710 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1711 if (lcladv & ADVERTISE_1000XPAUSE)
1713 if (rmtadv & ADVERTISE_1000XPAUSE)
1720 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1724 u32 old_rx_mode = tp->rx_mode;
1725 u32 old_tx_mode = tp->tx_mode;
1727 if (tg3_flag(tp, USE_PHYLIB))
1728 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1730 autoneg = tp->link_config.autoneg;
1732 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1733 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1734 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1736 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1738 flowctrl = tp->link_config.flowctrl;
1740 tp->link_config.active_flowctrl = flowctrl;
1742 if (flowctrl & FLOW_CTRL_RX)
1743 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1745 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1747 if (old_rx_mode != tp->rx_mode)
1748 tw32_f(MAC_RX_MODE, tp->rx_mode);
1750 if (flowctrl & FLOW_CTRL_TX)
1751 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1753 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1755 if (old_tx_mode != tp->tx_mode)
1756 tw32_f(MAC_TX_MODE, tp->tx_mode);
1759 static void tg3_adjust_link(struct net_device *dev)
1761 u8 oldflowctrl, linkmesg = 0;
1762 u32 mac_mode, lcl_adv, rmt_adv;
1763 struct tg3 *tp = netdev_priv(dev);
1764 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1766 spin_lock_bh(&tp->lock);
1768 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1769 MAC_MODE_HALF_DUPLEX);
1771 oldflowctrl = tp->link_config.active_flowctrl;
1777 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1778 mac_mode |= MAC_MODE_PORT_MODE_MII;
1779 else if (phydev->speed == SPEED_1000 ||
1780 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1781 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1783 mac_mode |= MAC_MODE_PORT_MODE_MII;
1785 if (phydev->duplex == DUPLEX_HALF)
1786 mac_mode |= MAC_MODE_HALF_DUPLEX;
1788 lcl_adv = mii_advertise_flowctrl(
1789 tp->link_config.flowctrl);
1792 rmt_adv = LPA_PAUSE_CAP;
1793 if (phydev->asym_pause)
1794 rmt_adv |= LPA_PAUSE_ASYM;
1797 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1799 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1801 if (mac_mode != tp->mac_mode) {
1802 tp->mac_mode = mac_mode;
1803 tw32_f(MAC_MODE, tp->mac_mode);
1807 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1808 if (phydev->speed == SPEED_10)
1810 MAC_MI_STAT_10MBPS_MODE |
1811 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1813 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1816 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1817 tw32(MAC_TX_LENGTHS,
1818 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1819 (6 << TX_LENGTHS_IPG_SHIFT) |
1820 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1822 tw32(MAC_TX_LENGTHS,
1823 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1824 (6 << TX_LENGTHS_IPG_SHIFT) |
1825 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1827 if (phydev->link != tp->old_link ||
1828 phydev->speed != tp->link_config.active_speed ||
1829 phydev->duplex != tp->link_config.active_duplex ||
1830 oldflowctrl != tp->link_config.active_flowctrl)
1833 tp->old_link = phydev->link;
1834 tp->link_config.active_speed = phydev->speed;
1835 tp->link_config.active_duplex = phydev->duplex;
1837 spin_unlock_bh(&tp->lock);
1840 tg3_link_report(tp);
1843 static int tg3_phy_init(struct tg3 *tp)
1845 struct phy_device *phydev;
1847 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1850 /* Bring the PHY back to a known state. */
1853 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1855 /* Attach the MAC to the PHY. */
1856 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1857 phydev->dev_flags, phydev->interface);
1858 if (IS_ERR(phydev)) {
1859 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1860 return PTR_ERR(phydev);
1863 /* Mask with MAC supported features. */
1864 switch (phydev->interface) {
1865 case PHY_INTERFACE_MODE_GMII:
1866 case PHY_INTERFACE_MODE_RGMII:
1867 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1868 phydev->supported &= (PHY_GBIT_FEATURES |
1870 SUPPORTED_Asym_Pause);
1874 case PHY_INTERFACE_MODE_MII:
1875 phydev->supported &= (PHY_BASIC_FEATURES |
1877 SUPPORTED_Asym_Pause);
1880 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1884 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1886 phydev->advertising = phydev->supported;
1891 static void tg3_phy_start(struct tg3 *tp)
1893 struct phy_device *phydev;
1895 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1898 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1900 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1901 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1902 phydev->speed = tp->link_config.speed;
1903 phydev->duplex = tp->link_config.duplex;
1904 phydev->autoneg = tp->link_config.autoneg;
1905 phydev->advertising = tp->link_config.advertising;
1910 phy_start_aneg(phydev);
1913 static void tg3_phy_stop(struct tg3 *tp)
1915 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1918 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1921 static void tg3_phy_fini(struct tg3 *tp)
1923 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1924 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1925 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1929 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1934 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1937 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1938 /* Cannot do read-modify-write on 5401 */
1939 err = tg3_phy_auxctl_write(tp,
1940 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1941 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1946 err = tg3_phy_auxctl_read(tp,
1947 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1951 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1952 err = tg3_phy_auxctl_write(tp,
1953 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1959 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1963 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1966 tg3_writephy(tp, MII_TG3_FET_TEST,
1967 phytest | MII_TG3_FET_SHADOW_EN);
1968 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1970 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1972 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1973 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1975 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1979 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1983 if (!tg3_flag(tp, 5705_PLUS) ||
1984 (tg3_flag(tp, 5717_PLUS) &&
1985 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1988 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1989 tg3_phy_fet_toggle_apd(tp, enable);
1993 reg = MII_TG3_MISC_SHDW_WREN |
1994 MII_TG3_MISC_SHDW_SCR5_SEL |
1995 MII_TG3_MISC_SHDW_SCR5_LPED |
1996 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1997 MII_TG3_MISC_SHDW_SCR5_SDTL |
1998 MII_TG3_MISC_SHDW_SCR5_C125OE;
1999 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2000 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2002 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2005 reg = MII_TG3_MISC_SHDW_WREN |
2006 MII_TG3_MISC_SHDW_APD_SEL |
2007 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2009 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2011 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2014 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2018 if (!tg3_flag(tp, 5705_PLUS) ||
2019 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2022 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2025 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2026 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2028 tg3_writephy(tp, MII_TG3_FET_TEST,
2029 ephy | MII_TG3_FET_SHADOW_EN);
2030 if (!tg3_readphy(tp, reg, &phy)) {
2032 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2034 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2035 tg3_writephy(tp, reg, phy);
2037 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2042 ret = tg3_phy_auxctl_read(tp,
2043 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2046 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2048 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2049 tg3_phy_auxctl_write(tp,
2050 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2055 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2060 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2063 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2065 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2066 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2069 static void tg3_phy_apply_otp(struct tg3 *tp)
2078 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2081 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2082 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2083 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2085 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2086 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2087 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2089 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2090 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2091 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2093 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2094 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2096 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2097 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2099 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2100 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2101 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2103 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2106 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2110 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2115 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2116 current_link_up == 1 &&
2117 tp->link_config.active_duplex == DUPLEX_FULL &&
2118 (tp->link_config.active_speed == SPEED_100 ||
2119 tp->link_config.active_speed == SPEED_1000)) {
2122 if (tp->link_config.active_speed == SPEED_1000)
2123 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2125 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2127 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2129 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2130 TG3_CL45_D7_EEERES_STAT, &val);
2132 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2133 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2137 if (!tp->setlpicnt) {
2138 if (current_link_up == 1 &&
2139 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2140 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2141 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2144 val = tr32(TG3_CPMU_EEE_MODE);
2145 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2149 static void tg3_phy_eee_enable(struct tg3 *tp)
2153 if (tp->link_config.active_speed == SPEED_1000 &&
2154 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2156 tg3_flag(tp, 57765_CLASS)) &&
2157 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158 val = MII_TG3_DSP_TAP26_ALNOKO |
2159 MII_TG3_DSP_TAP26_RMRXSTO;
2160 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2161 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2164 val = tr32(TG3_CPMU_EEE_MODE);
2165 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2168 static int tg3_wait_macro_done(struct tg3 *tp)
2175 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2176 if ((tmp32 & 0x1000) == 0)
2186 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2188 static const u32 test_pat[4][6] = {
2189 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2190 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2191 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2192 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2196 for (chan = 0; chan < 4; chan++) {
2199 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2200 (chan * 0x2000) | 0x0200);
2201 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2203 for (i = 0; i < 6; i++)
2204 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2207 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2208 if (tg3_wait_macro_done(tp)) {
2213 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2214 (chan * 0x2000) | 0x0200);
2215 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2216 if (tg3_wait_macro_done(tp)) {
2221 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2222 if (tg3_wait_macro_done(tp)) {
2227 for (i = 0; i < 6; i += 2) {
2230 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2231 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2232 tg3_wait_macro_done(tp)) {
2238 if (low != test_pat[chan][i] ||
2239 high != test_pat[chan][i+1]) {
2240 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2241 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2242 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2252 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2256 for (chan = 0; chan < 4; chan++) {
2259 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2260 (chan * 0x2000) | 0x0200);
2261 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2262 for (i = 0; i < 6; i++)
2263 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2264 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2265 if (tg3_wait_macro_done(tp))
2272 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2274 u32 reg32, phy9_orig;
2275 int retries, do_phy_reset, err;
2281 err = tg3_bmcr_reset(tp);
2287 /* Disable transmitter and interrupt. */
2288 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2292 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2294 /* Set full-duplex, 1000 mbps. */
2295 tg3_writephy(tp, MII_BMCR,
2296 BMCR_FULLDPLX | BMCR_SPEED1000);
2298 /* Set to master mode. */
2299 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2302 tg3_writephy(tp, MII_CTRL1000,
2303 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2305 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2309 /* Block the PHY control access. */
2310 tg3_phydsp_write(tp, 0x8005, 0x0800);
2312 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2315 } while (--retries);
2317 err = tg3_phy_reset_chanpat(tp);
2321 tg3_phydsp_write(tp, 0x8005, 0x0000);
2323 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2324 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2326 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2328 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2330 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2332 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2339 /* This will reset the tigon3 PHY if there is no valid
2340 * link unless the FORCE argument is non-zero.
2342 static int tg3_phy_reset(struct tg3 *tp)
2347 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2348 val = tr32(GRC_MISC_CFG);
2349 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2352 err = tg3_readphy(tp, MII_BMSR, &val);
2353 err |= tg3_readphy(tp, MII_BMSR, &val);
2357 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2358 netif_carrier_off(tp->dev);
2359 tg3_link_report(tp);
2362 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2363 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2364 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2365 err = tg3_phy_reset_5703_4_5(tp);
2372 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2373 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2374 cpmuctrl = tr32(TG3_CPMU_CTRL);
2375 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2377 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2380 err = tg3_bmcr_reset(tp);
2384 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2385 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2386 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2388 tw32(TG3_CPMU_CTRL, cpmuctrl);
2391 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2392 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2393 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2394 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2395 CPMU_LSPD_1000MB_MACCLK_12_5) {
2396 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2398 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2402 if (tg3_flag(tp, 5717_PLUS) &&
2403 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2406 tg3_phy_apply_otp(tp);
2408 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2409 tg3_phy_toggle_apd(tp, true);
2411 tg3_phy_toggle_apd(tp, false);
2414 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2415 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2416 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2417 tg3_phydsp_write(tp, 0x000a, 0x0323);
2418 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2421 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2422 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2423 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2426 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2427 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2428 tg3_phydsp_write(tp, 0x000a, 0x310b);
2429 tg3_phydsp_write(tp, 0x201f, 0x9506);
2430 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2431 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2433 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2434 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2435 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2436 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2437 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2438 tg3_writephy(tp, MII_TG3_TEST1,
2439 MII_TG3_TEST1_TRIM_EN | 0x4);
2441 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2443 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2447 /* Set Extended packet length bit (bit 14) on all chips that */
2448 /* support jumbo frames */
2449 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2450 /* Cannot do read-modify-write on 5401 */
2451 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2452 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2453 /* Set bit 14 with read-modify-write to preserve other bits */
2454 err = tg3_phy_auxctl_read(tp,
2455 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2457 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2458 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2461 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2462 * jumbo frames transmission.
2464 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2465 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2466 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2467 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2471 /* adjust output voltage */
2472 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2475 tg3_phy_toggle_automdix(tp, 1);
2476 tg3_phy_set_wirespeed(tp);
2480 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2481 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2482 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2483 TG3_GPIO_MSG_NEED_VAUX)
2484 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2485 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2486 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2487 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2488 (TG3_GPIO_MSG_DRVR_PRES << 12))
2490 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2491 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2492 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2493 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2494 (TG3_GPIO_MSG_NEED_VAUX << 12))
2496 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2500 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2501 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2502 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2504 status = tr32(TG3_CPMU_DRV_STATUS);
2506 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2507 status &= ~(TG3_GPIO_MSG_MASK << shift);
2508 status |= (newstat << shift);
2510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2512 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2514 tw32(TG3_CPMU_DRV_STATUS, status);
2516 return status >> TG3_APE_GPIO_MSG_SHIFT;
2519 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2521 if (!tg3_flag(tp, IS_NIC))
2524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2527 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2530 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2532 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2533 TG3_GRC_LCLCTL_PWRSW_DELAY);
2535 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2537 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2538 TG3_GRC_LCLCTL_PWRSW_DELAY);
2544 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2548 if (!tg3_flag(tp, IS_NIC) ||
2549 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2553 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2555 tw32_wait_f(GRC_LOCAL_CTRL,
2556 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2557 TG3_GRC_LCLCTL_PWRSW_DELAY);
2559 tw32_wait_f(GRC_LOCAL_CTRL,
2561 TG3_GRC_LCLCTL_PWRSW_DELAY);
2563 tw32_wait_f(GRC_LOCAL_CTRL,
2564 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2565 TG3_GRC_LCLCTL_PWRSW_DELAY);
2568 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2570 if (!tg3_flag(tp, IS_NIC))
2573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2575 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2576 (GRC_LCLCTRL_GPIO_OE0 |
2577 GRC_LCLCTRL_GPIO_OE1 |
2578 GRC_LCLCTRL_GPIO_OE2 |
2579 GRC_LCLCTRL_GPIO_OUTPUT0 |
2580 GRC_LCLCTRL_GPIO_OUTPUT1),
2581 TG3_GRC_LCLCTL_PWRSW_DELAY);
2582 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2583 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2584 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2585 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2586 GRC_LCLCTRL_GPIO_OE1 |
2587 GRC_LCLCTRL_GPIO_OE2 |
2588 GRC_LCLCTRL_GPIO_OUTPUT0 |
2589 GRC_LCLCTRL_GPIO_OUTPUT1 |
2591 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2592 TG3_GRC_LCLCTL_PWRSW_DELAY);
2594 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2595 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2596 TG3_GRC_LCLCTL_PWRSW_DELAY);
2598 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2599 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2600 TG3_GRC_LCLCTL_PWRSW_DELAY);
2603 u32 grc_local_ctrl = 0;
2605 /* Workaround to prevent overdrawing Amps. */
2606 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2607 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2608 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2610 TG3_GRC_LCLCTL_PWRSW_DELAY);
2613 /* On 5753 and variants, GPIO2 cannot be used. */
2614 no_gpio2 = tp->nic_sram_data_cfg &
2615 NIC_SRAM_DATA_CFG_NO_GPIO2;
2617 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2618 GRC_LCLCTRL_GPIO_OE1 |
2619 GRC_LCLCTRL_GPIO_OE2 |
2620 GRC_LCLCTRL_GPIO_OUTPUT1 |
2621 GRC_LCLCTRL_GPIO_OUTPUT2;
2623 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2624 GRC_LCLCTRL_GPIO_OUTPUT2);
2626 tw32_wait_f(GRC_LOCAL_CTRL,
2627 tp->grc_local_ctrl | grc_local_ctrl,
2628 TG3_GRC_LCLCTL_PWRSW_DELAY);
2630 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2632 tw32_wait_f(GRC_LOCAL_CTRL,
2633 tp->grc_local_ctrl | grc_local_ctrl,
2634 TG3_GRC_LCLCTL_PWRSW_DELAY);
2637 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2638 tw32_wait_f(GRC_LOCAL_CTRL,
2639 tp->grc_local_ctrl | grc_local_ctrl,
2640 TG3_GRC_LCLCTL_PWRSW_DELAY);
2645 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2649 /* Serialize power state transitions */
2650 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2653 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2654 msg = TG3_GPIO_MSG_NEED_VAUX;
2656 msg = tg3_set_function_status(tp, msg);
2658 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2661 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2662 tg3_pwrsrc_switch_to_vaux(tp);
2664 tg3_pwrsrc_die_with_vmain(tp);
2667 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2670 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2672 bool need_vaux = false;
2674 /* The GPIOs do something completely different on 57765. */
2675 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2680 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2681 tg3_frob_aux_power_5717(tp, include_wol ?
2682 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2686 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2687 struct net_device *dev_peer;
2689 dev_peer = pci_get_drvdata(tp->pdev_peer);
2691 /* remove_one() may have been run on the peer. */
2693 struct tg3 *tp_peer = netdev_priv(dev_peer);
2695 if (tg3_flag(tp_peer, INIT_COMPLETE))
2698 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2699 tg3_flag(tp_peer, ENABLE_ASF))
2704 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2705 tg3_flag(tp, ENABLE_ASF))
2709 tg3_pwrsrc_switch_to_vaux(tp);
2711 tg3_pwrsrc_die_with_vmain(tp);
2714 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2716 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2718 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2719 if (speed != SPEED_10)
2721 } else if (speed == SPEED_10)
2727 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2731 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2733 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2734 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2737 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2738 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2739 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2746 val = tr32(GRC_MISC_CFG);
2747 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2750 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2752 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2755 tg3_writephy(tp, MII_ADVERTISE, 0);
2756 tg3_writephy(tp, MII_BMCR,
2757 BMCR_ANENABLE | BMCR_ANRESTART);
2759 tg3_writephy(tp, MII_TG3_FET_TEST,
2760 phytest | MII_TG3_FET_SHADOW_EN);
2761 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2762 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2764 MII_TG3_FET_SHDW_AUXMODE4,
2767 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2770 } else if (do_low_power) {
2771 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2772 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2774 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2775 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2776 MII_TG3_AUXCTL_PCTL_VREG_11V;
2777 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2780 /* The PHY should not be powered down on some chips because
2783 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2785 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2786 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2787 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2791 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2792 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2793 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2794 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2795 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2796 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2799 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2802 /* tp->lock is held. */
2803 static int tg3_nvram_lock(struct tg3 *tp)
2805 if (tg3_flag(tp, NVRAM)) {
2808 if (tp->nvram_lock_cnt == 0) {
2809 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2810 for (i = 0; i < 8000; i++) {
2811 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2816 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2820 tp->nvram_lock_cnt++;
2825 /* tp->lock is held. */
2826 static void tg3_nvram_unlock(struct tg3 *tp)
2828 if (tg3_flag(tp, NVRAM)) {
2829 if (tp->nvram_lock_cnt > 0)
2830 tp->nvram_lock_cnt--;
2831 if (tp->nvram_lock_cnt == 0)
2832 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2836 /* tp->lock is held. */
2837 static void tg3_enable_nvram_access(struct tg3 *tp)
2839 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2840 u32 nvaccess = tr32(NVRAM_ACCESS);
2842 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2846 /* tp->lock is held. */
2847 static void tg3_disable_nvram_access(struct tg3 *tp)
2849 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2850 u32 nvaccess = tr32(NVRAM_ACCESS);
2852 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2856 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2857 u32 offset, u32 *val)
2862 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2865 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2866 EEPROM_ADDR_DEVID_MASK |
2868 tw32(GRC_EEPROM_ADDR,
2870 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2871 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2872 EEPROM_ADDR_ADDR_MASK) |
2873 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2875 for (i = 0; i < 1000; i++) {
2876 tmp = tr32(GRC_EEPROM_ADDR);
2878 if (tmp & EEPROM_ADDR_COMPLETE)
2882 if (!(tmp & EEPROM_ADDR_COMPLETE))
2885 tmp = tr32(GRC_EEPROM_DATA);
2888 * The data will always be opposite the native endian
2889 * format. Perform a blind byteswap to compensate.
2896 #define NVRAM_CMD_TIMEOUT 10000
2898 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2902 tw32(NVRAM_CMD, nvram_cmd);
2903 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2905 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2911 if (i == NVRAM_CMD_TIMEOUT)
2917 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2919 if (tg3_flag(tp, NVRAM) &&
2920 tg3_flag(tp, NVRAM_BUFFERED) &&
2921 tg3_flag(tp, FLASH) &&
2922 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2923 (tp->nvram_jedecnum == JEDEC_ATMEL))
2925 addr = ((addr / tp->nvram_pagesize) <<
2926 ATMEL_AT45DB0X1B_PAGE_POS) +
2927 (addr % tp->nvram_pagesize);
2932 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2934 if (tg3_flag(tp, NVRAM) &&
2935 tg3_flag(tp, NVRAM_BUFFERED) &&
2936 tg3_flag(tp, FLASH) &&
2937 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2938 (tp->nvram_jedecnum == JEDEC_ATMEL))
2940 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2941 tp->nvram_pagesize) +
2942 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2947 /* NOTE: Data read in from NVRAM is byteswapped according to
2948 * the byteswapping settings for all other register accesses.
2949 * tg3 devices are BE devices, so on a BE machine, the data
2950 * returned will be exactly as it is seen in NVRAM. On a LE
2951 * machine, the 32-bit value will be byteswapped.
2953 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2957 if (!tg3_flag(tp, NVRAM))
2958 return tg3_nvram_read_using_eeprom(tp, offset, val);
2960 offset = tg3_nvram_phys_addr(tp, offset);
2962 if (offset > NVRAM_ADDR_MSK)
2965 ret = tg3_nvram_lock(tp);
2969 tg3_enable_nvram_access(tp);
2971 tw32(NVRAM_ADDR, offset);
2972 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2973 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2976 *val = tr32(NVRAM_RDDATA);
2978 tg3_disable_nvram_access(tp);
2980 tg3_nvram_unlock(tp);
2985 /* Ensures NVRAM data is in bytestream format. */
2986 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2989 int res = tg3_nvram_read(tp, offset, &v);
2991 *val = cpu_to_be32(v);
2995 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2996 u32 offset, u32 len, u8 *buf)
3001 for (i = 0; i < len; i += 4) {
3007 memcpy(&data, buf + i, 4);
3010 * The SEEPROM interface expects the data to always be opposite
3011 * the native endian format. We accomplish this by reversing
3012 * all the operations that would have been performed on the
3013 * data from a call to tg3_nvram_read_be32().
3015 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3017 val = tr32(GRC_EEPROM_ADDR);
3018 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3020 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3022 tw32(GRC_EEPROM_ADDR, val |
3023 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3024 (addr & EEPROM_ADDR_ADDR_MASK) |
3028 for (j = 0; j < 1000; j++) {
3029 val = tr32(GRC_EEPROM_ADDR);
3031 if (val & EEPROM_ADDR_COMPLETE)
3035 if (!(val & EEPROM_ADDR_COMPLETE)) {
3044 /* offset and length are dword aligned */
3045 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3049 u32 pagesize = tp->nvram_pagesize;
3050 u32 pagemask = pagesize - 1;
3054 tmp = kmalloc(pagesize, GFP_KERNEL);
3060 u32 phy_addr, page_off, size;
3062 phy_addr = offset & ~pagemask;
3064 for (j = 0; j < pagesize; j += 4) {
3065 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3066 (__be32 *) (tmp + j));
3073 page_off = offset & pagemask;
3080 memcpy(tmp + page_off, buf, size);
3082 offset = offset + (pagesize - page_off);
3084 tg3_enable_nvram_access(tp);
3087 * Before we can erase the flash page, we need
3088 * to issue a special "write enable" command.
3090 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3092 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3095 /* Erase the target page */
3096 tw32(NVRAM_ADDR, phy_addr);
3098 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3099 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3101 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3104 /* Issue another write enable to start the write. */
3105 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3107 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3110 for (j = 0; j < pagesize; j += 4) {
3113 data = *((__be32 *) (tmp + j));
3115 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3117 tw32(NVRAM_ADDR, phy_addr + j);
3119 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3123 nvram_cmd |= NVRAM_CMD_FIRST;
3124 else if (j == (pagesize - 4))
3125 nvram_cmd |= NVRAM_CMD_LAST;
3127 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3135 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3136 tg3_nvram_exec_cmd(tp, nvram_cmd);
3143 /* offset and length are dword aligned */
3144 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3149 for (i = 0; i < len; i += 4, offset += 4) {
3150 u32 page_off, phy_addr, nvram_cmd;
3153 memcpy(&data, buf + i, 4);
3154 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3156 page_off = offset % tp->nvram_pagesize;
3158 phy_addr = tg3_nvram_phys_addr(tp, offset);
3160 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3162 if (page_off == 0 || i == 0)
3163 nvram_cmd |= NVRAM_CMD_FIRST;
3164 if (page_off == (tp->nvram_pagesize - 4))
3165 nvram_cmd |= NVRAM_CMD_LAST;
3168 nvram_cmd |= NVRAM_CMD_LAST;
3170 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3171 !tg3_flag(tp, FLASH) ||
3172 !tg3_flag(tp, 57765_PLUS))
3173 tw32(NVRAM_ADDR, phy_addr);
3175 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3176 !tg3_flag(tp, 5755_PLUS) &&
3177 (tp->nvram_jedecnum == JEDEC_ST) &&
3178 (nvram_cmd & NVRAM_CMD_FIRST)) {
3181 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3182 ret = tg3_nvram_exec_cmd(tp, cmd);
3186 if (!tg3_flag(tp, FLASH)) {
3187 /* We always do complete word writes to eeprom. */
3188 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3191 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3198 /* offset and length are dword aligned */
3199 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3203 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3204 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3205 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3209 if (!tg3_flag(tp, NVRAM)) {
3210 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3214 ret = tg3_nvram_lock(tp);
3218 tg3_enable_nvram_access(tp);
3219 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3220 tw32(NVRAM_WRITE1, 0x406);
3222 grc_mode = tr32(GRC_MODE);
3223 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3225 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3226 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3229 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3233 grc_mode = tr32(GRC_MODE);
3234 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3236 tg3_disable_nvram_access(tp);
3237 tg3_nvram_unlock(tp);
3240 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3241 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3248 #define RX_CPU_SCRATCH_BASE 0x30000
3249 #define RX_CPU_SCRATCH_SIZE 0x04000
3250 #define TX_CPU_SCRATCH_BASE 0x34000
3251 #define TX_CPU_SCRATCH_SIZE 0x04000
3253 /* tp->lock is held. */
3254 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3258 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3261 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3263 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3266 if (offset == RX_CPU_BASE) {
3267 for (i = 0; i < 10000; i++) {
3268 tw32(offset + CPU_STATE, 0xffffffff);
3269 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3270 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3274 tw32(offset + CPU_STATE, 0xffffffff);
3275 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3278 for (i = 0; i < 10000; i++) {
3279 tw32(offset + CPU_STATE, 0xffffffff);
3280 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3281 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3287 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3288 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3292 /* Clear firmware's nvram arbitration. */
3293 if (tg3_flag(tp, NVRAM))
3294 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3299 unsigned int fw_base;
3300 unsigned int fw_len;
3301 const __be32 *fw_data;
3304 /* tp->lock is held. */
3305 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3306 u32 cpu_scratch_base, int cpu_scratch_size,
3307 struct fw_info *info)
3309 int err, lock_err, i;
3310 void (*write_op)(struct tg3 *, u32, u32);
3312 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3314 "%s: Trying to load TX cpu firmware which is 5705\n",
3319 if (tg3_flag(tp, 5705_PLUS))
3320 write_op = tg3_write_mem;
3322 write_op = tg3_write_indirect_reg32;
3324 /* It is possible that bootcode is still loading at this point.
3325 * Get the nvram lock first before halting the cpu.
3327 lock_err = tg3_nvram_lock(tp);
3328 err = tg3_halt_cpu(tp, cpu_base);
3330 tg3_nvram_unlock(tp);
3334 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3335 write_op(tp, cpu_scratch_base + i, 0);
3336 tw32(cpu_base + CPU_STATE, 0xffffffff);
3337 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3338 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3339 write_op(tp, (cpu_scratch_base +
3340 (info->fw_base & 0xffff) +
3342 be32_to_cpu(info->fw_data[i]));
3350 /* tp->lock is held. */
3351 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3353 struct fw_info info;
3354 const __be32 *fw_data;
3357 fw_data = (void *)tp->fw->data;
3359 /* Firmware blob starts with version numbers, followed by
3360 start address and length. We are setting complete length.
3361 length = end_address_of_bss - start_address_of_text.
3362 Remainder is the blob to be loaded contiguously
3363 from start address. */
3365 info.fw_base = be32_to_cpu(fw_data[1]);
3366 info.fw_len = tp->fw->size - 12;
3367 info.fw_data = &fw_data[3];
3369 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3370 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3375 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3376 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3381 /* Now startup only the RX cpu. */
3382 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3383 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3385 for (i = 0; i < 5; i++) {
3386 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3388 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3389 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3390 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3394 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3395 "should be %08x\n", __func__,
3396 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3399 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3400 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3405 /* tp->lock is held. */
3406 static int tg3_load_tso_firmware(struct tg3 *tp)
3408 struct fw_info info;
3409 const __be32 *fw_data;
3410 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3413 if (tg3_flag(tp, HW_TSO_1) ||
3414 tg3_flag(tp, HW_TSO_2) ||
3415 tg3_flag(tp, HW_TSO_3))
3418 fw_data = (void *)tp->fw->data;
3420 /* Firmware blob starts with version numbers, followed by
3421 start address and length. We are setting complete length.
3422 length = end_address_of_bss - start_address_of_text.
3423 Remainder is the blob to be loaded contiguously
3424 from start address. */
3426 info.fw_base = be32_to_cpu(fw_data[1]);
3427 cpu_scratch_size = tp->fw_len;
3428 info.fw_len = tp->fw->size - 12;
3429 info.fw_data = &fw_data[3];
3431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3432 cpu_base = RX_CPU_BASE;
3433 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3435 cpu_base = TX_CPU_BASE;
3436 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3437 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3440 err = tg3_load_firmware_cpu(tp, cpu_base,
3441 cpu_scratch_base, cpu_scratch_size,
3446 /* Now startup the cpu. */
3447 tw32(cpu_base + CPU_STATE, 0xffffffff);
3448 tw32_f(cpu_base + CPU_PC, info.fw_base);
3450 for (i = 0; i < 5; i++) {
3451 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3453 tw32(cpu_base + CPU_STATE, 0xffffffff);
3454 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3455 tw32_f(cpu_base + CPU_PC, info.fw_base);
3460 "%s fails to set CPU PC, is %08x should be %08x\n",
3461 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3464 tw32(cpu_base + CPU_STATE, 0xffffffff);
3465 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3470 /* tp->lock is held. */
3471 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3473 u32 addr_high, addr_low;
3476 addr_high = ((tp->dev->dev_addr[0] << 8) |
3477 tp->dev->dev_addr[1]);
3478 addr_low = ((tp->dev->dev_addr[2] << 24) |
3479 (tp->dev->dev_addr[3] << 16) |
3480 (tp->dev->dev_addr[4] << 8) |
3481 (tp->dev->dev_addr[5] << 0));
3482 for (i = 0; i < 4; i++) {
3483 if (i == 1 && skip_mac_1)
3485 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3486 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3491 for (i = 0; i < 12; i++) {
3492 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3493 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3497 addr_high = (tp->dev->dev_addr[0] +
3498 tp->dev->dev_addr[1] +
3499 tp->dev->dev_addr[2] +
3500 tp->dev->dev_addr[3] +
3501 tp->dev->dev_addr[4] +
3502 tp->dev->dev_addr[5]) &
3503 TX_BACKOFF_SEED_MASK;
3504 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3507 static void tg3_enable_register_access(struct tg3 *tp)
3510 * Make sure register accesses (indirect or otherwise) will function
3513 pci_write_config_dword(tp->pdev,
3514 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3517 static int tg3_power_up(struct tg3 *tp)
3521 tg3_enable_register_access(tp);
3523 err = pci_set_power_state(tp->pdev, PCI_D0);
3525 /* Switch out of Vaux if it is a NIC */
3526 tg3_pwrsrc_switch_to_vmain(tp);
3528 netdev_err(tp->dev, "Transition to D0 failed\n");
3534 static int tg3_setup_phy(struct tg3 *, int);
3536 static int tg3_power_down_prepare(struct tg3 *tp)
3539 bool device_should_wake, do_low_power;
3541 tg3_enable_register_access(tp);
3543 /* Restore the CLKREQ setting. */
3544 if (tg3_flag(tp, CLKREQ_BUG)) {
3547 pci_read_config_word(tp->pdev,
3548 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3550 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3551 pci_write_config_word(tp->pdev,
3552 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3556 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3557 tw32(TG3PCI_MISC_HOST_CTRL,
3558 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3560 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3561 tg3_flag(tp, WOL_ENABLE);
3563 if (tg3_flag(tp, USE_PHYLIB)) {
3564 do_low_power = false;
3565 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3566 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3567 struct phy_device *phydev;
3568 u32 phyid, advertising;
3570 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3572 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3574 tp->link_config.speed = phydev->speed;
3575 tp->link_config.duplex = phydev->duplex;
3576 tp->link_config.autoneg = phydev->autoneg;
3577 tp->link_config.advertising = phydev->advertising;
3579 advertising = ADVERTISED_TP |
3581 ADVERTISED_Autoneg |
3582 ADVERTISED_10baseT_Half;
3584 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3585 if (tg3_flag(tp, WOL_SPEED_100MB))
3587 ADVERTISED_100baseT_Half |
3588 ADVERTISED_100baseT_Full |
3589 ADVERTISED_10baseT_Full;
3591 advertising |= ADVERTISED_10baseT_Full;
3594 phydev->advertising = advertising;
3596 phy_start_aneg(phydev);
3598 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3599 if (phyid != PHY_ID_BCMAC131) {
3600 phyid &= PHY_BCM_OUI_MASK;
3601 if (phyid == PHY_BCM_OUI_1 ||
3602 phyid == PHY_BCM_OUI_2 ||
3603 phyid == PHY_BCM_OUI_3)
3604 do_low_power = true;
3608 do_low_power = true;
3610 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3611 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3613 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3614 tg3_setup_phy(tp, 0);
3617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3620 val = tr32(GRC_VCPU_EXT_CTRL);
3621 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3622 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3626 for (i = 0; i < 200; i++) {
3627 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3628 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3633 if (tg3_flag(tp, WOL_CAP))
3634 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3635 WOL_DRV_STATE_SHUTDOWN |
3639 if (device_should_wake) {
3642 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3644 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3645 tg3_phy_auxctl_write(tp,
3646 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3647 MII_TG3_AUXCTL_PCTL_WOL_EN |
3648 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3649 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3653 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3654 mac_mode = MAC_MODE_PORT_MODE_GMII;
3656 mac_mode = MAC_MODE_PORT_MODE_MII;
3658 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3659 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3661 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3662 SPEED_100 : SPEED_10;
3663 if (tg3_5700_link_polarity(tp, speed))
3664 mac_mode |= MAC_MODE_LINK_POLARITY;
3666 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3669 mac_mode = MAC_MODE_PORT_MODE_TBI;
3672 if (!tg3_flag(tp, 5750_PLUS))
3673 tw32(MAC_LED_CTRL, tp->led_ctrl);
3675 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3676 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3677 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3678 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3680 if (tg3_flag(tp, ENABLE_APE))
3681 mac_mode |= MAC_MODE_APE_TX_EN |
3682 MAC_MODE_APE_RX_EN |
3683 MAC_MODE_TDE_ENABLE;
3685 tw32_f(MAC_MODE, mac_mode);
3688 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3692 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3693 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3694 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3697 base_val = tp->pci_clock_ctrl;
3698 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3699 CLOCK_CTRL_TXCLK_DISABLE);
3701 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3702 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3703 } else if (tg3_flag(tp, 5780_CLASS) ||
3704 tg3_flag(tp, CPMU_PRESENT) ||
3705 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3707 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3708 u32 newbits1, newbits2;
3710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3711 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3712 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3713 CLOCK_CTRL_TXCLK_DISABLE |
3715 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3716 } else if (tg3_flag(tp, 5705_PLUS)) {
3717 newbits1 = CLOCK_CTRL_625_CORE;
3718 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3720 newbits1 = CLOCK_CTRL_ALTCLK;
3721 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3724 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3727 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3730 if (!tg3_flag(tp, 5705_PLUS)) {
3733 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3734 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3735 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3736 CLOCK_CTRL_TXCLK_DISABLE |
3737 CLOCK_CTRL_44MHZ_CORE);
3739 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3742 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3743 tp->pci_clock_ctrl | newbits3, 40);
3747 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3748 tg3_power_down_phy(tp, do_low_power);
3750 tg3_frob_aux_power(tp, true);
3752 /* Workaround for unstable PLL clock */
3753 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3754 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3755 u32 val = tr32(0x7d00);
3757 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3759 if (!tg3_flag(tp, ENABLE_ASF)) {
3762 err = tg3_nvram_lock(tp);
3763 tg3_halt_cpu(tp, RX_CPU_BASE);
3765 tg3_nvram_unlock(tp);
3769 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3774 static void tg3_power_down(struct tg3 *tp)
3776 tg3_power_down_prepare(tp);
3778 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3779 pci_set_power_state(tp->pdev, PCI_D3hot);
3782 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3784 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3785 case MII_TG3_AUX_STAT_10HALF:
3787 *duplex = DUPLEX_HALF;
3790 case MII_TG3_AUX_STAT_10FULL:
3792 *duplex = DUPLEX_FULL;
3795 case MII_TG3_AUX_STAT_100HALF:
3797 *duplex = DUPLEX_HALF;
3800 case MII_TG3_AUX_STAT_100FULL:
3802 *duplex = DUPLEX_FULL;
3805 case MII_TG3_AUX_STAT_1000HALF:
3806 *speed = SPEED_1000;
3807 *duplex = DUPLEX_HALF;
3810 case MII_TG3_AUX_STAT_1000FULL:
3811 *speed = SPEED_1000;
3812 *duplex = DUPLEX_FULL;
3816 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3817 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3819 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3823 *speed = SPEED_UNKNOWN;
3824 *duplex = DUPLEX_UNKNOWN;
3829 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3834 new_adv = ADVERTISE_CSMA;
3835 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3836 new_adv |= mii_advertise_flowctrl(flowctrl);
3838 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3842 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3843 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3845 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3846 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3847 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3849 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3854 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3857 tw32(TG3_CPMU_EEE_MODE,
3858 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3860 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3865 /* Advertise 100-BaseTX EEE ability */
3866 if (advertise & ADVERTISED_100baseT_Full)
3867 val |= MDIO_AN_EEE_ADV_100TX;
3868 /* Advertise 1000-BaseT EEE ability */
3869 if (advertise & ADVERTISED_1000baseT_Full)
3870 val |= MDIO_AN_EEE_ADV_1000T;
3871 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3875 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3877 case ASIC_REV_57765:
3878 case ASIC_REV_57766:
3880 /* If we advertised any eee advertisements above... */
3882 val = MII_TG3_DSP_TAP26_ALNOKO |
3883 MII_TG3_DSP_TAP26_RMRXSTO |
3884 MII_TG3_DSP_TAP26_OPCSINPT;
3885 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3888 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3889 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3890 MII_TG3_DSP_CH34TP2_HIBW01);
3893 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3902 static void tg3_phy_copper_begin(struct tg3 *tp)
3904 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3905 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3908 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3909 adv = ADVERTISED_10baseT_Half |
3910 ADVERTISED_10baseT_Full;
3911 if (tg3_flag(tp, WOL_SPEED_100MB))
3912 adv |= ADVERTISED_100baseT_Half |
3913 ADVERTISED_100baseT_Full;
3915 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3917 adv = tp->link_config.advertising;
3918 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3919 adv &= ~(ADVERTISED_1000baseT_Half |
3920 ADVERTISED_1000baseT_Full);
3922 fc = tp->link_config.flowctrl;
3925 tg3_phy_autoneg_cfg(tp, adv, fc);
3927 tg3_writephy(tp, MII_BMCR,
3928 BMCR_ANENABLE | BMCR_ANRESTART);
3931 u32 bmcr, orig_bmcr;
3933 tp->link_config.active_speed = tp->link_config.speed;
3934 tp->link_config.active_duplex = tp->link_config.duplex;
3937 switch (tp->link_config.speed) {
3943 bmcr |= BMCR_SPEED100;
3947 bmcr |= BMCR_SPEED1000;
3951 if (tp->link_config.duplex == DUPLEX_FULL)
3952 bmcr |= BMCR_FULLDPLX;
3954 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3955 (bmcr != orig_bmcr)) {
3956 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3957 for (i = 0; i < 1500; i++) {
3961 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3962 tg3_readphy(tp, MII_BMSR, &tmp))
3964 if (!(tmp & BMSR_LSTATUS)) {
3969 tg3_writephy(tp, MII_BMCR, bmcr);
3975 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3979 /* Turn off tap power management. */
3980 /* Set Extended packet length bit */
3981 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3983 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3984 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3985 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3986 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3987 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3994 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3996 u32 advmsk, tgtadv, advertising;
3998 advertising = tp->link_config.advertising;
3999 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4001 advmsk = ADVERTISE_ALL;
4002 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4003 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4004 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4007 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4010 if ((*lcladv & advmsk) != tgtadv)
4013 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4016 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4018 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4022 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4023 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4024 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4025 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4026 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4028 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4031 if (tg3_ctrl != tgtadv)
4038 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4042 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4045 if (tg3_readphy(tp, MII_STAT1000, &val))
4048 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4051 if (tg3_readphy(tp, MII_LPA, rmtadv))
4054 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4055 tp->link_config.rmt_adv = lpeth;
4060 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4062 int current_link_up;
4064 u32 lcl_adv, rmt_adv;
4072 (MAC_STATUS_SYNC_CHANGED |
4073 MAC_STATUS_CFG_CHANGED |
4074 MAC_STATUS_MI_COMPLETION |
4075 MAC_STATUS_LNKSTATE_CHANGED));
4078 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4080 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4084 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4086 /* Some third-party PHYs need to be reset on link going
4089 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4092 netif_carrier_ok(tp->dev)) {
4093 tg3_readphy(tp, MII_BMSR, &bmsr);
4094 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4095 !(bmsr & BMSR_LSTATUS))
4101 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4102 tg3_readphy(tp, MII_BMSR, &bmsr);
4103 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4104 !tg3_flag(tp, INIT_COMPLETE))
4107 if (!(bmsr & BMSR_LSTATUS)) {
4108 err = tg3_init_5401phy_dsp(tp);
4112 tg3_readphy(tp, MII_BMSR, &bmsr);
4113 for (i = 0; i < 1000; i++) {
4115 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4116 (bmsr & BMSR_LSTATUS)) {
4122 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4123 TG3_PHY_REV_BCM5401_B0 &&
4124 !(bmsr & BMSR_LSTATUS) &&
4125 tp->link_config.active_speed == SPEED_1000) {
4126 err = tg3_phy_reset(tp);
4128 err = tg3_init_5401phy_dsp(tp);
4133 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4134 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4135 /* 5701 {A0,B0} CRC bug workaround */
4136 tg3_writephy(tp, 0x15, 0x0a75);
4137 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4138 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4139 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4142 /* Clear pending interrupts... */
4143 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4144 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4146 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4147 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4148 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4149 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4153 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4154 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4155 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4157 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4160 current_link_up = 0;
4161 current_speed = SPEED_UNKNOWN;
4162 current_duplex = DUPLEX_UNKNOWN;
4163 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4164 tp->link_config.rmt_adv = 0;
4166 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4167 err = tg3_phy_auxctl_read(tp,
4168 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4170 if (!err && !(val & (1 << 10))) {
4171 tg3_phy_auxctl_write(tp,
4172 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4179 for (i = 0; i < 100; i++) {
4180 tg3_readphy(tp, MII_BMSR, &bmsr);
4181 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4182 (bmsr & BMSR_LSTATUS))
4187 if (bmsr & BMSR_LSTATUS) {
4190 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4191 for (i = 0; i < 2000; i++) {
4193 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4198 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4203 for (i = 0; i < 200; i++) {
4204 tg3_readphy(tp, MII_BMCR, &bmcr);
4205 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4207 if (bmcr && bmcr != 0x7fff)
4215 tp->link_config.active_speed = current_speed;
4216 tp->link_config.active_duplex = current_duplex;
4218 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4219 if ((bmcr & BMCR_ANENABLE) &&
4220 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4221 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4222 current_link_up = 1;
4224 if (!(bmcr & BMCR_ANENABLE) &&
4225 tp->link_config.speed == current_speed &&
4226 tp->link_config.duplex == current_duplex &&
4227 tp->link_config.flowctrl ==
4228 tp->link_config.active_flowctrl) {
4229 current_link_up = 1;
4233 if (current_link_up == 1 &&
4234 tp->link_config.active_duplex == DUPLEX_FULL) {
4237 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4238 reg = MII_TG3_FET_GEN_STAT;
4239 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4241 reg = MII_TG3_EXT_STAT;
4242 bit = MII_TG3_EXT_STAT_MDIX;
4245 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4246 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4248 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4253 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4254 tg3_phy_copper_begin(tp);
4256 tg3_readphy(tp, MII_BMSR, &bmsr);
4257 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4258 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4259 current_link_up = 1;
4262 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4263 if (current_link_up == 1) {
4264 if (tp->link_config.active_speed == SPEED_100 ||
4265 tp->link_config.active_speed == SPEED_10)
4266 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4268 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4269 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4270 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4272 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4274 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4275 if (tp->link_config.active_duplex == DUPLEX_HALF)
4276 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4279 if (current_link_up == 1 &&
4280 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4281 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4283 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4286 /* ??? Without this setting Netgear GA302T PHY does not
4287 * ??? send/receive packets...
4289 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4290 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4291 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4292 tw32_f(MAC_MI_MODE, tp->mi_mode);
4296 tw32_f(MAC_MODE, tp->mac_mode);
4299 tg3_phy_eee_adjust(tp, current_link_up);
4301 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4302 /* Polled via timer. */
4303 tw32_f(MAC_EVENT, 0);
4305 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4310 current_link_up == 1 &&
4311 tp->link_config.active_speed == SPEED_1000 &&
4312 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4315 (MAC_STATUS_SYNC_CHANGED |
4316 MAC_STATUS_CFG_CHANGED));
4319 NIC_SRAM_FIRMWARE_MBOX,
4320 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4323 /* Prevent send BD corruption. */
4324 if (tg3_flag(tp, CLKREQ_BUG)) {
4325 u16 oldlnkctl, newlnkctl;
4327 pci_read_config_word(tp->pdev,
4328 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4330 if (tp->link_config.active_speed == SPEED_100 ||
4331 tp->link_config.active_speed == SPEED_10)
4332 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4334 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4335 if (newlnkctl != oldlnkctl)
4336 pci_write_config_word(tp->pdev,
4337 pci_pcie_cap(tp->pdev) +
4338 PCI_EXP_LNKCTL, newlnkctl);
4341 if (current_link_up != netif_carrier_ok(tp->dev)) {
4342 if (current_link_up)
4343 netif_carrier_on(tp->dev);
4345 netif_carrier_off(tp->dev);
4346 tg3_link_report(tp);
4352 struct tg3_fiber_aneginfo {
4354 #define ANEG_STATE_UNKNOWN 0
4355 #define ANEG_STATE_AN_ENABLE 1
4356 #define ANEG_STATE_RESTART_INIT 2
4357 #define ANEG_STATE_RESTART 3
4358 #define ANEG_STATE_DISABLE_LINK_OK 4
4359 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4360 #define ANEG_STATE_ABILITY_DETECT 6
4361 #define ANEG_STATE_ACK_DETECT_INIT 7
4362 #define ANEG_STATE_ACK_DETECT 8
4363 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4364 #define ANEG_STATE_COMPLETE_ACK 10
4365 #define ANEG_STATE_IDLE_DETECT_INIT 11
4366 #define ANEG_STATE_IDLE_DETECT 12
4367 #define ANEG_STATE_LINK_OK 13
4368 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4369 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4372 #define MR_AN_ENABLE 0x00000001
4373 #define MR_RESTART_AN 0x00000002
4374 #define MR_AN_COMPLETE 0x00000004
4375 #define MR_PAGE_RX 0x00000008
4376 #define MR_NP_LOADED 0x00000010
4377 #define MR_TOGGLE_TX 0x00000020
4378 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4379 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4380 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4381 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4382 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4383 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4384 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4385 #define MR_TOGGLE_RX 0x00002000
4386 #define MR_NP_RX 0x00004000
4388 #define MR_LINK_OK 0x80000000
4390 unsigned long link_time, cur_time;
4392 u32 ability_match_cfg;
4393 int ability_match_count;
4395 char ability_match, idle_match, ack_match;
4397 u32 txconfig, rxconfig;
4398 #define ANEG_CFG_NP 0x00000080
4399 #define ANEG_CFG_ACK 0x00000040
4400 #define ANEG_CFG_RF2 0x00000020
4401 #define ANEG_CFG_RF1 0x00000010
4402 #define ANEG_CFG_PS2 0x00000001
4403 #define ANEG_CFG_PS1 0x00008000
4404 #define ANEG_CFG_HD 0x00004000
4405 #define ANEG_CFG_FD 0x00002000
4406 #define ANEG_CFG_INVAL 0x00001f06
4411 #define ANEG_TIMER_ENAB 2
4412 #define ANEG_FAILED -1
4414 #define ANEG_STATE_SETTLE_TIME 10000
4416 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4417 struct tg3_fiber_aneginfo *ap)
4420 unsigned long delta;
4424 if (ap->state == ANEG_STATE_UNKNOWN) {
4428 ap->ability_match_cfg = 0;
4429 ap->ability_match_count = 0;
4430 ap->ability_match = 0;
4436 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4437 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4439 if (rx_cfg_reg != ap->ability_match_cfg) {
4440 ap->ability_match_cfg = rx_cfg_reg;
4441 ap->ability_match = 0;
4442 ap->ability_match_count = 0;
4444 if (++ap->ability_match_count > 1) {
4445 ap->ability_match = 1;
4446 ap->ability_match_cfg = rx_cfg_reg;
4449 if (rx_cfg_reg & ANEG_CFG_ACK)
4457 ap->ability_match_cfg = 0;
4458 ap->ability_match_count = 0;
4459 ap->ability_match = 0;
4465 ap->rxconfig = rx_cfg_reg;
4468 switch (ap->state) {
4469 case ANEG_STATE_UNKNOWN:
4470 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4471 ap->state = ANEG_STATE_AN_ENABLE;
4474 case ANEG_STATE_AN_ENABLE:
4475 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4476 if (ap->flags & MR_AN_ENABLE) {
4479 ap->ability_match_cfg = 0;
4480 ap->ability_match_count = 0;
4481 ap->ability_match = 0;
4485 ap->state = ANEG_STATE_RESTART_INIT;
4487 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4491 case ANEG_STATE_RESTART_INIT:
4492 ap->link_time = ap->cur_time;
4493 ap->flags &= ~(MR_NP_LOADED);
4495 tw32(MAC_TX_AUTO_NEG, 0);
4496 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4497 tw32_f(MAC_MODE, tp->mac_mode);
4500 ret = ANEG_TIMER_ENAB;
4501 ap->state = ANEG_STATE_RESTART;
4504 case ANEG_STATE_RESTART:
4505 delta = ap->cur_time - ap->link_time;
4506 if (delta > ANEG_STATE_SETTLE_TIME)
4507 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4509 ret = ANEG_TIMER_ENAB;
4512 case ANEG_STATE_DISABLE_LINK_OK:
4516 case ANEG_STATE_ABILITY_DETECT_INIT:
4517 ap->flags &= ~(MR_TOGGLE_TX);
4518 ap->txconfig = ANEG_CFG_FD;
4519 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4520 if (flowctrl & ADVERTISE_1000XPAUSE)
4521 ap->txconfig |= ANEG_CFG_PS1;
4522 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4523 ap->txconfig |= ANEG_CFG_PS2;
4524 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4525 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4526 tw32_f(MAC_MODE, tp->mac_mode);
4529 ap->state = ANEG_STATE_ABILITY_DETECT;
4532 case ANEG_STATE_ABILITY_DETECT:
4533 if (ap->ability_match != 0 && ap->rxconfig != 0)
4534 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4537 case ANEG_STATE_ACK_DETECT_INIT:
4538 ap->txconfig |= ANEG_CFG_ACK;
4539 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4540 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4541 tw32_f(MAC_MODE, tp->mac_mode);
4544 ap->state = ANEG_STATE_ACK_DETECT;
4547 case ANEG_STATE_ACK_DETECT:
4548 if (ap->ack_match != 0) {
4549 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4550 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4551 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4553 ap->state = ANEG_STATE_AN_ENABLE;
4555 } else if (ap->ability_match != 0 &&
4556 ap->rxconfig == 0) {
4557 ap->state = ANEG_STATE_AN_ENABLE;
4561 case ANEG_STATE_COMPLETE_ACK_INIT:
4562 if (ap->rxconfig & ANEG_CFG_INVAL) {
4566 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4567 MR_LP_ADV_HALF_DUPLEX |
4568 MR_LP_ADV_SYM_PAUSE |
4569 MR_LP_ADV_ASYM_PAUSE |
4570 MR_LP_ADV_REMOTE_FAULT1 |
4571 MR_LP_ADV_REMOTE_FAULT2 |
4572 MR_LP_ADV_NEXT_PAGE |
4575 if (ap->rxconfig & ANEG_CFG_FD)
4576 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4577 if (ap->rxconfig & ANEG_CFG_HD)
4578 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4579 if (ap->rxconfig & ANEG_CFG_PS1)
4580 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4581 if (ap->rxconfig & ANEG_CFG_PS2)
4582 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4583 if (ap->rxconfig & ANEG_CFG_RF1)
4584 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4585 if (ap->rxconfig & ANEG_CFG_RF2)
4586 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4587 if (ap->rxconfig & ANEG_CFG_NP)
4588 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4590 ap->link_time = ap->cur_time;
4592 ap->flags ^= (MR_TOGGLE_TX);
4593 if (ap->rxconfig & 0x0008)
4594 ap->flags |= MR_TOGGLE_RX;
4595 if (ap->rxconfig & ANEG_CFG_NP)
4596 ap->flags |= MR_NP_RX;
4597 ap->flags |= MR_PAGE_RX;
4599 ap->state = ANEG_STATE_COMPLETE_ACK;
4600 ret = ANEG_TIMER_ENAB;
4603 case ANEG_STATE_COMPLETE_ACK:
4604 if (ap->ability_match != 0 &&
4605 ap->rxconfig == 0) {
4606 ap->state = ANEG_STATE_AN_ENABLE;
4609 delta = ap->cur_time - ap->link_time;
4610 if (delta > ANEG_STATE_SETTLE_TIME) {
4611 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4612 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4614 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4615 !(ap->flags & MR_NP_RX)) {
4616 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4624 case ANEG_STATE_IDLE_DETECT_INIT:
4625 ap->link_time = ap->cur_time;
4626 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4627 tw32_f(MAC_MODE, tp->mac_mode);
4630 ap->state = ANEG_STATE_IDLE_DETECT;
4631 ret = ANEG_TIMER_ENAB;
4634 case ANEG_STATE_IDLE_DETECT:
4635 if (ap->ability_match != 0 &&
4636 ap->rxconfig == 0) {
4637 ap->state = ANEG_STATE_AN_ENABLE;
4640 delta = ap->cur_time - ap->link_time;
4641 if (delta > ANEG_STATE_SETTLE_TIME) {
4642 /* XXX another gem from the Broadcom driver :( */
4643 ap->state = ANEG_STATE_LINK_OK;
4647 case ANEG_STATE_LINK_OK:
4648 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4652 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4653 /* ??? unimplemented */
4656 case ANEG_STATE_NEXT_PAGE_WAIT:
4657 /* ??? unimplemented */
4668 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4671 struct tg3_fiber_aneginfo aninfo;
4672 int status = ANEG_FAILED;
4676 tw32_f(MAC_TX_AUTO_NEG, 0);
4678 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4679 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4682 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4685 memset(&aninfo, 0, sizeof(aninfo));
4686 aninfo.flags |= MR_AN_ENABLE;
4687 aninfo.state = ANEG_STATE_UNKNOWN;
4688 aninfo.cur_time = 0;
4690 while (++tick < 195000) {
4691 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4692 if (status == ANEG_DONE || status == ANEG_FAILED)
4698 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4699 tw32_f(MAC_MODE, tp->mac_mode);
4702 *txflags = aninfo.txconfig;
4703 *rxflags = aninfo.flags;
4705 if (status == ANEG_DONE &&
4706 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4707 MR_LP_ADV_FULL_DUPLEX)))
4713 static void tg3_init_bcm8002(struct tg3 *tp)
4715 u32 mac_status = tr32(MAC_STATUS);
4718 /* Reset when initting first time or we have a link. */
4719 if (tg3_flag(tp, INIT_COMPLETE) &&
4720 !(mac_status & MAC_STATUS_PCS_SYNCED))
4723 /* Set PLL lock range. */
4724 tg3_writephy(tp, 0x16, 0x8007);
4727 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4729 /* Wait for reset to complete. */
4730 /* XXX schedule_timeout() ... */
4731 for (i = 0; i < 500; i++)
4734 /* Config mode; select PMA/Ch 1 regs. */
4735 tg3_writephy(tp, 0x10, 0x8411);
4737 /* Enable auto-lock and comdet, select txclk for tx. */
4738 tg3_writephy(tp, 0x11, 0x0a10);
4740 tg3_writephy(tp, 0x18, 0x00a0);
4741 tg3_writephy(tp, 0x16, 0x41ff);
4743 /* Assert and deassert POR. */
4744 tg3_writephy(tp, 0x13, 0x0400);
4746 tg3_writephy(tp, 0x13, 0x0000);
4748 tg3_writephy(tp, 0x11, 0x0a50);
4750 tg3_writephy(tp, 0x11, 0x0a10);
4752 /* Wait for signal to stabilize */
4753 /* XXX schedule_timeout() ... */
4754 for (i = 0; i < 15000; i++)
4757 /* Deselect the channel register so we can read the PHYID
4760 tg3_writephy(tp, 0x10, 0x8011);
4763 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4766 u32 sg_dig_ctrl, sg_dig_status;
4767 u32 serdes_cfg, expected_sg_dig_ctrl;
4768 int workaround, port_a;
4769 int current_link_up;
4772 expected_sg_dig_ctrl = 0;
4775 current_link_up = 0;
4777 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4778 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4780 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4783 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4784 /* preserve bits 20-23 for voltage regulator */
4785 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4788 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4790 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4791 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4793 u32 val = serdes_cfg;
4799 tw32_f(MAC_SERDES_CFG, val);
4802 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4804 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4805 tg3_setup_flow_control(tp, 0, 0);
4806 current_link_up = 1;
4811 /* Want auto-negotiation. */
4812 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4814 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4815 if (flowctrl & ADVERTISE_1000XPAUSE)
4816 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4817 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4818 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4820 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4821 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4822 tp->serdes_counter &&
4823 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4824 MAC_STATUS_RCVD_CFG)) ==
4825 MAC_STATUS_PCS_SYNCED)) {
4826 tp->serdes_counter--;
4827 current_link_up = 1;
4832 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4833 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4835 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4837 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4838 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4839 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4840 MAC_STATUS_SIGNAL_DET)) {
4841 sg_dig_status = tr32(SG_DIG_STATUS);
4842 mac_status = tr32(MAC_STATUS);
4844 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4845 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4846 u32 local_adv = 0, remote_adv = 0;
4848 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4849 local_adv |= ADVERTISE_1000XPAUSE;
4850 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4851 local_adv |= ADVERTISE_1000XPSE_ASYM;
4853 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4854 remote_adv |= LPA_1000XPAUSE;
4855 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4856 remote_adv |= LPA_1000XPAUSE_ASYM;
4858 tp->link_config.rmt_adv =
4859 mii_adv_to_ethtool_adv_x(remote_adv);
4861 tg3_setup_flow_control(tp, local_adv, remote_adv);
4862 current_link_up = 1;
4863 tp->serdes_counter = 0;
4864 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4865 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4866 if (tp->serdes_counter)
4867 tp->serdes_counter--;
4870 u32 val = serdes_cfg;
4877 tw32_f(MAC_SERDES_CFG, val);
4880 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4883 /* Link parallel detection - link is up */
4884 /* only if we have PCS_SYNC and not */
4885 /* receiving config code words */
4886 mac_status = tr32(MAC_STATUS);
4887 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4888 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4889 tg3_setup_flow_control(tp, 0, 0);
4890 current_link_up = 1;
4892 TG3_PHYFLG_PARALLEL_DETECT;
4893 tp->serdes_counter =
4894 SERDES_PARALLEL_DET_TIMEOUT;
4896 goto restart_autoneg;
4900 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4905 return current_link_up;
4908 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4910 int current_link_up = 0;
4912 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4915 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4916 u32 txflags, rxflags;
4919 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4920 u32 local_adv = 0, remote_adv = 0;
4922 if (txflags & ANEG_CFG_PS1)
4923 local_adv |= ADVERTISE_1000XPAUSE;
4924 if (txflags & ANEG_CFG_PS2)
4925 local_adv |= ADVERTISE_1000XPSE_ASYM;
4927 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4928 remote_adv |= LPA_1000XPAUSE;
4929 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4930 remote_adv |= LPA_1000XPAUSE_ASYM;
4932 tp->link_config.rmt_adv =
4933 mii_adv_to_ethtool_adv_x(remote_adv);
4935 tg3_setup_flow_control(tp, local_adv, remote_adv);
4937 current_link_up = 1;
4939 for (i = 0; i < 30; i++) {
4942 (MAC_STATUS_SYNC_CHANGED |
4943 MAC_STATUS_CFG_CHANGED));
4945 if ((tr32(MAC_STATUS) &
4946 (MAC_STATUS_SYNC_CHANGED |
4947 MAC_STATUS_CFG_CHANGED)) == 0)
4951 mac_status = tr32(MAC_STATUS);
4952 if (current_link_up == 0 &&
4953 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4954 !(mac_status & MAC_STATUS_RCVD_CFG))
4955 current_link_up = 1;
4957 tg3_setup_flow_control(tp, 0, 0);
4959 /* Forcing 1000FD link up. */
4960 current_link_up = 1;
4962 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4965 tw32_f(MAC_MODE, tp->mac_mode);
4970 return current_link_up;
4973 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4976 u16 orig_active_speed;
4977 u8 orig_active_duplex;
4979 int current_link_up;
4982 orig_pause_cfg = tp->link_config.active_flowctrl;
4983 orig_active_speed = tp->link_config.active_speed;
4984 orig_active_duplex = tp->link_config.active_duplex;
4986 if (!tg3_flag(tp, HW_AUTONEG) &&
4987 netif_carrier_ok(tp->dev) &&
4988 tg3_flag(tp, INIT_COMPLETE)) {
4989 mac_status = tr32(MAC_STATUS);
4990 mac_status &= (MAC_STATUS_PCS_SYNCED |
4991 MAC_STATUS_SIGNAL_DET |
4992 MAC_STATUS_CFG_CHANGED |
4993 MAC_STATUS_RCVD_CFG);
4994 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4995 MAC_STATUS_SIGNAL_DET)) {
4996 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4997 MAC_STATUS_CFG_CHANGED));
5002 tw32_f(MAC_TX_AUTO_NEG, 0);
5004 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5005 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5006 tw32_f(MAC_MODE, tp->mac_mode);
5009 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5010 tg3_init_bcm8002(tp);
5012 /* Enable link change event even when serdes polling. */
5013 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5016 current_link_up = 0;
5017 tp->link_config.rmt_adv = 0;
5018 mac_status = tr32(MAC_STATUS);
5020 if (tg3_flag(tp, HW_AUTONEG))
5021 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5023 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5025 tp->napi[0].hw_status->status =
5026 (SD_STATUS_UPDATED |
5027 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5029 for (i = 0; i < 100; i++) {
5030 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5031 MAC_STATUS_CFG_CHANGED));
5033 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5034 MAC_STATUS_CFG_CHANGED |
5035 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5039 mac_status = tr32(MAC_STATUS);
5040 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5041 current_link_up = 0;
5042 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5043 tp->serdes_counter == 0) {
5044 tw32_f(MAC_MODE, (tp->mac_mode |
5045 MAC_MODE_SEND_CONFIGS));
5047 tw32_f(MAC_MODE, tp->mac_mode);
5051 if (current_link_up == 1) {
5052 tp->link_config.active_speed = SPEED_1000;
5053 tp->link_config.active_duplex = DUPLEX_FULL;
5054 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5055 LED_CTRL_LNKLED_OVERRIDE |
5056 LED_CTRL_1000MBPS_ON));
5058 tp->link_config.active_speed = SPEED_UNKNOWN;
5059 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5060 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5061 LED_CTRL_LNKLED_OVERRIDE |
5062 LED_CTRL_TRAFFIC_OVERRIDE));
5065 if (current_link_up != netif_carrier_ok(tp->dev)) {
5066 if (current_link_up)
5067 netif_carrier_on(tp->dev);
5069 netif_carrier_off(tp->dev);
5070 tg3_link_report(tp);
5072 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5073 if (orig_pause_cfg != now_pause_cfg ||
5074 orig_active_speed != tp->link_config.active_speed ||
5075 orig_active_duplex != tp->link_config.active_duplex)
5076 tg3_link_report(tp);
5082 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5084 int current_link_up, err = 0;
5088 u32 local_adv, remote_adv;
5090 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5091 tw32_f(MAC_MODE, tp->mac_mode);
5097 (MAC_STATUS_SYNC_CHANGED |
5098 MAC_STATUS_CFG_CHANGED |
5099 MAC_STATUS_MI_COMPLETION |
5100 MAC_STATUS_LNKSTATE_CHANGED));
5106 current_link_up = 0;
5107 current_speed = SPEED_UNKNOWN;
5108 current_duplex = DUPLEX_UNKNOWN;
5109 tp->link_config.rmt_adv = 0;
5111 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5112 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5114 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5115 bmsr |= BMSR_LSTATUS;
5117 bmsr &= ~BMSR_LSTATUS;
5120 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5122 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5123 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5124 /* do nothing, just check for link up at the end */
5125 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5128 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5129 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5130 ADVERTISE_1000XPAUSE |
5131 ADVERTISE_1000XPSE_ASYM |
5134 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5135 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5137 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5138 tg3_writephy(tp, MII_ADVERTISE, newadv);
5139 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5140 tg3_writephy(tp, MII_BMCR, bmcr);
5142 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5143 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5144 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5151 bmcr &= ~BMCR_SPEED1000;
5152 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5154 if (tp->link_config.duplex == DUPLEX_FULL)
5155 new_bmcr |= BMCR_FULLDPLX;
5157 if (new_bmcr != bmcr) {
5158 /* BMCR_SPEED1000 is a reserved bit that needs
5159 * to be set on write.
5161 new_bmcr |= BMCR_SPEED1000;
5163 /* Force a linkdown */
5164 if (netif_carrier_ok(tp->dev)) {
5167 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5168 adv &= ~(ADVERTISE_1000XFULL |
5169 ADVERTISE_1000XHALF |
5171 tg3_writephy(tp, MII_ADVERTISE, adv);
5172 tg3_writephy(tp, MII_BMCR, bmcr |
5176 netif_carrier_off(tp->dev);
5178 tg3_writephy(tp, MII_BMCR, new_bmcr);
5180 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5181 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5182 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5184 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5185 bmsr |= BMSR_LSTATUS;
5187 bmsr &= ~BMSR_LSTATUS;
5189 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5193 if (bmsr & BMSR_LSTATUS) {
5194 current_speed = SPEED_1000;
5195 current_link_up = 1;
5196 if (bmcr & BMCR_FULLDPLX)
5197 current_duplex = DUPLEX_FULL;
5199 current_duplex = DUPLEX_HALF;
5204 if (bmcr & BMCR_ANENABLE) {
5207 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5208 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5209 common = local_adv & remote_adv;
5210 if (common & (ADVERTISE_1000XHALF |
5211 ADVERTISE_1000XFULL)) {
5212 if (common & ADVERTISE_1000XFULL)
5213 current_duplex = DUPLEX_FULL;
5215 current_duplex = DUPLEX_HALF;
5217 tp->link_config.rmt_adv =
5218 mii_adv_to_ethtool_adv_x(remote_adv);
5219 } else if (!tg3_flag(tp, 5780_CLASS)) {
5220 /* Link is up via parallel detect */
5222 current_link_up = 0;
5227 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5228 tg3_setup_flow_control(tp, local_adv, remote_adv);
5230 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5231 if (tp->link_config.active_duplex == DUPLEX_HALF)
5232 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5234 tw32_f(MAC_MODE, tp->mac_mode);
5237 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5239 tp->link_config.active_speed = current_speed;
5240 tp->link_config.active_duplex = current_duplex;
5242 if (current_link_up != netif_carrier_ok(tp->dev)) {
5243 if (current_link_up)
5244 netif_carrier_on(tp->dev);
5246 netif_carrier_off(tp->dev);
5247 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5249 tg3_link_report(tp);
5254 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5256 if (tp->serdes_counter) {
5257 /* Give autoneg time to complete. */
5258 tp->serdes_counter--;
5262 if (!netif_carrier_ok(tp->dev) &&
5263 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5266 tg3_readphy(tp, MII_BMCR, &bmcr);
5267 if (bmcr & BMCR_ANENABLE) {
5270 /* Select shadow register 0x1f */
5271 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5272 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5274 /* Select expansion interrupt status register */
5275 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5276 MII_TG3_DSP_EXP1_INT_STAT);
5277 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5278 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5280 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5281 /* We have signal detect and not receiving
5282 * config code words, link is up by parallel
5286 bmcr &= ~BMCR_ANENABLE;
5287 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5288 tg3_writephy(tp, MII_BMCR, bmcr);
5289 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5292 } else if (netif_carrier_ok(tp->dev) &&
5293 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5294 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5297 /* Select expansion interrupt status register */
5298 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5299 MII_TG3_DSP_EXP1_INT_STAT);
5300 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5304 /* Config code words received, turn on autoneg. */
5305 tg3_readphy(tp, MII_BMCR, &bmcr);
5306 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5308 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5314 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5319 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5320 err = tg3_setup_fiber_phy(tp, force_reset);
5321 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5322 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5324 err = tg3_setup_copper_phy(tp, force_reset);
5326 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5329 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5330 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5332 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5337 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5338 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5339 tw32(GRC_MISC_CFG, val);
5342 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5343 (6 << TX_LENGTHS_IPG_SHIFT);
5344 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5345 val |= tr32(MAC_TX_LENGTHS) &
5346 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5347 TX_LENGTHS_CNT_DWN_VAL_MSK);
5349 if (tp->link_config.active_speed == SPEED_1000 &&
5350 tp->link_config.active_duplex == DUPLEX_HALF)
5351 tw32(MAC_TX_LENGTHS, val |
5352 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5354 tw32(MAC_TX_LENGTHS, val |
5355 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5357 if (!tg3_flag(tp, 5705_PLUS)) {
5358 if (netif_carrier_ok(tp->dev)) {
5359 tw32(HOSTCC_STAT_COAL_TICKS,
5360 tp->coal.stats_block_coalesce_usecs);
5362 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5366 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5367 val = tr32(PCIE_PWR_MGMT_THRESH);
5368 if (!netif_carrier_ok(tp->dev))
5369 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5372 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5373 tw32(PCIE_PWR_MGMT_THRESH, val);
5379 static inline int tg3_irq_sync(struct tg3 *tp)
5381 return tp->irq_sync;
5384 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5388 dst = (u32 *)((u8 *)dst + off);
5389 for (i = 0; i < len; i += sizeof(u32))
5390 *dst++ = tr32(off + i);
5393 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5395 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5396 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5397 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5398 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5399 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5400 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5401 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5402 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5403 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5404 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5405 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5406 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5407 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5408 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5409 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5410 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5411 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5412 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5413 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5415 if (tg3_flag(tp, SUPPORT_MSIX))
5416 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5418 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5419 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5420 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5421 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5422 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5423 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5424 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5425 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5427 if (!tg3_flag(tp, 5705_PLUS)) {
5428 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5429 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5430 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5433 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5434 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5435 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5436 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5437 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5439 if (tg3_flag(tp, NVRAM))
5440 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5443 static void tg3_dump_state(struct tg3 *tp)
5448 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5450 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5454 if (tg3_flag(tp, PCI_EXPRESS)) {
5455 /* Read up to but not including private PCI registers */
5456 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5457 regs[i / sizeof(u32)] = tr32(i);
5459 tg3_dump_legacy_regs(tp, regs);
5461 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5462 if (!regs[i + 0] && !regs[i + 1] &&
5463 !regs[i + 2] && !regs[i + 3])
5466 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5468 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5473 for (i = 0; i < tp->irq_cnt; i++) {
5474 struct tg3_napi *tnapi = &tp->napi[i];
5476 /* SW status block */
5478 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5480 tnapi->hw_status->status,
5481 tnapi->hw_status->status_tag,
5482 tnapi->hw_status->rx_jumbo_consumer,
5483 tnapi->hw_status->rx_consumer,
5484 tnapi->hw_status->rx_mini_consumer,
5485 tnapi->hw_status->idx[0].rx_producer,
5486 tnapi->hw_status->idx[0].tx_consumer);
5489 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5491 tnapi->last_tag, tnapi->last_irq_tag,
5492 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5494 tnapi->prodring.rx_std_prod_idx,
5495 tnapi->prodring.rx_std_cons_idx,
5496 tnapi->prodring.rx_jmb_prod_idx,
5497 tnapi->prodring.rx_jmb_cons_idx);
5501 /* This is called whenever we suspect that the system chipset is re-
5502 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5503 * is bogus tx completions. We try to recover by setting the
5504 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5507 static void tg3_tx_recover(struct tg3 *tp)
5509 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5510 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5512 netdev_warn(tp->dev,
5513 "The system may be re-ordering memory-mapped I/O "
5514 "cycles to the network device, attempting to recover. "
5515 "Please report the problem to the driver maintainer "
5516 "and include system chipset information.\n");
5518 spin_lock(&tp->lock);
5519 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5520 spin_unlock(&tp->lock);
5523 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5525 /* Tell compiler to fetch tx indices from memory. */
5527 return tnapi->tx_pending -
5528 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5531 /* Tigon3 never reports partial packet sends. So we do not
5532 * need special logic to handle SKBs that have not had all
5533 * of their frags sent yet, like SunGEM does.
5535 static void tg3_tx(struct tg3_napi *tnapi)
5537 struct tg3 *tp = tnapi->tp;
5538 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5539 u32 sw_idx = tnapi->tx_cons;
5540 struct netdev_queue *txq;
5541 int index = tnapi - tp->napi;
5542 unsigned int pkts_compl = 0, bytes_compl = 0;
5544 if (tg3_flag(tp, ENABLE_TSS))
5547 txq = netdev_get_tx_queue(tp->dev, index);
5549 while (sw_idx != hw_idx) {
5550 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5551 struct sk_buff *skb = ri->skb;
5554 if (unlikely(skb == NULL)) {
5559 pci_unmap_single(tp->pdev,
5560 dma_unmap_addr(ri, mapping),
5566 while (ri->fragmented) {
5567 ri->fragmented = false;
5568 sw_idx = NEXT_TX(sw_idx);
5569 ri = &tnapi->tx_buffers[sw_idx];
5572 sw_idx = NEXT_TX(sw_idx);
5574 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5575 ri = &tnapi->tx_buffers[sw_idx];
5576 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5579 pci_unmap_page(tp->pdev,
5580 dma_unmap_addr(ri, mapping),
5581 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5584 while (ri->fragmented) {
5585 ri->fragmented = false;
5586 sw_idx = NEXT_TX(sw_idx);
5587 ri = &tnapi->tx_buffers[sw_idx];
5590 sw_idx = NEXT_TX(sw_idx);
5594 bytes_compl += skb->len;
5598 if (unlikely(tx_bug)) {
5604 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5606 tnapi->tx_cons = sw_idx;
5608 /* Need to make the tx_cons update visible to tg3_start_xmit()
5609 * before checking for netif_queue_stopped(). Without the
5610 * memory barrier, there is a small possibility that tg3_start_xmit()
5611 * will miss it and cause the queue to be stopped forever.
5615 if (unlikely(netif_tx_queue_stopped(txq) &&
5616 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5617 __netif_tx_lock(txq, smp_processor_id());
5618 if (netif_tx_queue_stopped(txq) &&
5619 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5620 netif_tx_wake_queue(txq);
5621 __netif_tx_unlock(txq);
5625 static void tg3_frag_free(bool is_frag, void *data)
5628 put_page(virt_to_head_page(data));
5633 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5635 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5636 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5641 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5642 map_sz, PCI_DMA_FROMDEVICE);
5643 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5648 /* Returns size of skb allocated or < 0 on error.
5650 * We only need to fill in the address because the other members
5651 * of the RX descriptor are invariant, see tg3_init_rings.
5653 * Note the purposeful assymetry of cpu vs. chip accesses. For
5654 * posting buffers we only dirty the first cache line of the RX
5655 * descriptor (containing the address). Whereas for the RX status
5656 * buffers the cpu only reads the last cacheline of the RX descriptor
5657 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5659 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5660 u32 opaque_key, u32 dest_idx_unmasked,
5661 unsigned int *frag_size)
5663 struct tg3_rx_buffer_desc *desc;
5664 struct ring_info *map;
5667 int skb_size, data_size, dest_idx;
5669 switch (opaque_key) {
5670 case RXD_OPAQUE_RING_STD:
5671 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5672 desc = &tpr->rx_std[dest_idx];
5673 map = &tpr->rx_std_buffers[dest_idx];
5674 data_size = tp->rx_pkt_map_sz;
5677 case RXD_OPAQUE_RING_JUMBO:
5678 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5679 desc = &tpr->rx_jmb[dest_idx].std;
5680 map = &tpr->rx_jmb_buffers[dest_idx];
5681 data_size = TG3_RX_JMB_MAP_SZ;
5688 /* Do not overwrite any of the map or rp information
5689 * until we are sure we can commit to a new buffer.
5691 * Callers depend upon this behavior and assume that
5692 * we leave everything unchanged if we fail.
5694 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5695 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5696 if (skb_size <= PAGE_SIZE) {
5697 data = netdev_alloc_frag(skb_size);
5698 *frag_size = skb_size;
5700 data = kmalloc(skb_size, GFP_ATOMIC);
5706 mapping = pci_map_single(tp->pdev,
5707 data + TG3_RX_OFFSET(tp),
5709 PCI_DMA_FROMDEVICE);
5710 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5711 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5716 dma_unmap_addr_set(map, mapping, mapping);
5718 desc->addr_hi = ((u64)mapping >> 32);
5719 desc->addr_lo = ((u64)mapping & 0xffffffff);
5724 /* We only need to move over in the address because the other
5725 * members of the RX descriptor are invariant. See notes above
5726 * tg3_alloc_rx_data for full details.
5728 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5729 struct tg3_rx_prodring_set *dpr,
5730 u32 opaque_key, int src_idx,
5731 u32 dest_idx_unmasked)
5733 struct tg3 *tp = tnapi->tp;
5734 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5735 struct ring_info *src_map, *dest_map;
5736 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5739 switch (opaque_key) {
5740 case RXD_OPAQUE_RING_STD:
5741 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5742 dest_desc = &dpr->rx_std[dest_idx];
5743 dest_map = &dpr->rx_std_buffers[dest_idx];
5744 src_desc = &spr->rx_std[src_idx];
5745 src_map = &spr->rx_std_buffers[src_idx];
5748 case RXD_OPAQUE_RING_JUMBO:
5749 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5750 dest_desc = &dpr->rx_jmb[dest_idx].std;
5751 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5752 src_desc = &spr->rx_jmb[src_idx].std;
5753 src_map = &spr->rx_jmb_buffers[src_idx];
5760 dest_map->data = src_map->data;
5761 dma_unmap_addr_set(dest_map, mapping,
5762 dma_unmap_addr(src_map, mapping));
5763 dest_desc->addr_hi = src_desc->addr_hi;
5764 dest_desc->addr_lo = src_desc->addr_lo;
5766 /* Ensure that the update to the skb happens after the physical
5767 * addresses have been transferred to the new BD location.
5771 src_map->data = NULL;
5774 /* The RX ring scheme is composed of multiple rings which post fresh
5775 * buffers to the chip, and one special ring the chip uses to report
5776 * status back to the host.
5778 * The special ring reports the status of received packets to the
5779 * host. The chip does not write into the original descriptor the
5780 * RX buffer was obtained from. The chip simply takes the original
5781 * descriptor as provided by the host, updates the status and length
5782 * field, then writes this into the next status ring entry.
5784 * Each ring the host uses to post buffers to the chip is described
5785 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5786 * it is first placed into the on-chip ram. When the packet's length
5787 * is known, it walks down the TG3_BDINFO entries to select the ring.
5788 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5789 * which is within the range of the new packet's length is chosen.
5791 * The "separate ring for rx status" scheme may sound queer, but it makes
5792 * sense from a cache coherency perspective. If only the host writes
5793 * to the buffer post rings, and only the chip writes to the rx status
5794 * rings, then cache lines never move beyond shared-modified state.
5795 * If both the host and chip were to write into the same ring, cache line
5796 * eviction could occur since both entities want it in an exclusive state.
5798 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5800 struct tg3 *tp = tnapi->tp;
5801 u32 work_mask, rx_std_posted = 0;
5802 u32 std_prod_idx, jmb_prod_idx;
5803 u32 sw_idx = tnapi->rx_rcb_ptr;
5806 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5808 hw_idx = *(tnapi->rx_rcb_prod_idx);
5810 * We need to order the read of hw_idx and the read of
5811 * the opaque cookie.
5816 std_prod_idx = tpr->rx_std_prod_idx;
5817 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5818 while (sw_idx != hw_idx && budget > 0) {
5819 struct ring_info *ri;
5820 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5822 struct sk_buff *skb;
5823 dma_addr_t dma_addr;
5824 u32 opaque_key, desc_idx, *post_ptr;
5827 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5828 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5829 if (opaque_key == RXD_OPAQUE_RING_STD) {
5830 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5831 dma_addr = dma_unmap_addr(ri, mapping);
5833 post_ptr = &std_prod_idx;
5835 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5836 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5837 dma_addr = dma_unmap_addr(ri, mapping);
5839 post_ptr = &jmb_prod_idx;
5841 goto next_pkt_nopost;
5843 work_mask |= opaque_key;
5845 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5846 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5848 tg3_recycle_rx(tnapi, tpr, opaque_key,
5849 desc_idx, *post_ptr);
5851 /* Other statistics kept track of by card. */
5856 prefetch(data + TG3_RX_OFFSET(tp));
5857 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5860 if (len > TG3_RX_COPY_THRESH(tp)) {
5862 unsigned int frag_size;
5864 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5865 *post_ptr, &frag_size);
5869 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5870 PCI_DMA_FROMDEVICE);
5872 skb = build_skb(data, frag_size);
5874 tg3_frag_free(frag_size != 0, data);
5875 goto drop_it_no_recycle;
5877 skb_reserve(skb, TG3_RX_OFFSET(tp));
5878 /* Ensure that the update to the data happens
5879 * after the usage of the old DMA mapping.
5886 tg3_recycle_rx(tnapi, tpr, opaque_key,
5887 desc_idx, *post_ptr);
5889 skb = netdev_alloc_skb(tp->dev,
5890 len + TG3_RAW_IP_ALIGN);
5892 goto drop_it_no_recycle;
5894 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5895 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5897 data + TG3_RX_OFFSET(tp),
5899 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5903 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5904 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5905 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5906 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5907 skb->ip_summed = CHECKSUM_UNNECESSARY;
5909 skb_checksum_none_assert(skb);
5911 skb->protocol = eth_type_trans(skb, tp->dev);
5913 if (len > (tp->dev->mtu + ETH_HLEN) &&
5914 skb->protocol != htons(ETH_P_8021Q)) {
5916 goto drop_it_no_recycle;
5919 if (desc->type_flags & RXD_FLAG_VLAN &&
5920 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5921 __vlan_hwaccel_put_tag(skb,
5922 desc->err_vlan & RXD_VLAN_MASK);
5924 napi_gro_receive(&tnapi->napi, skb);
5932 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5933 tpr->rx_std_prod_idx = std_prod_idx &
5934 tp->rx_std_ring_mask;
5935 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5936 tpr->rx_std_prod_idx);
5937 work_mask &= ~RXD_OPAQUE_RING_STD;
5942 sw_idx &= tp->rx_ret_ring_mask;
5944 /* Refresh hw_idx to see if there is new work */
5945 if (sw_idx == hw_idx) {
5946 hw_idx = *(tnapi->rx_rcb_prod_idx);
5951 /* ACK the status ring. */
5952 tnapi->rx_rcb_ptr = sw_idx;
5953 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5955 /* Refill RX ring(s). */
5956 if (!tg3_flag(tp, ENABLE_RSS)) {
5957 /* Sync BD data before updating mailbox */
5960 if (work_mask & RXD_OPAQUE_RING_STD) {
5961 tpr->rx_std_prod_idx = std_prod_idx &
5962 tp->rx_std_ring_mask;
5963 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5964 tpr->rx_std_prod_idx);
5966 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5967 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5968 tp->rx_jmb_ring_mask;
5969 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5970 tpr->rx_jmb_prod_idx);
5973 } else if (work_mask) {
5974 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5975 * updated before the producer indices can be updated.
5979 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5980 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5982 if (tnapi != &tp->napi[1]) {
5983 tp->rx_refill = true;
5984 napi_schedule(&tp->napi[1].napi);
5991 static void tg3_poll_link(struct tg3 *tp)
5993 /* handle link change and other phy events */
5994 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5995 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5997 if (sblk->status & SD_STATUS_LINK_CHG) {
5998 sblk->status = SD_STATUS_UPDATED |
5999 (sblk->status & ~SD_STATUS_LINK_CHG);
6000 spin_lock(&tp->lock);
6001 if (tg3_flag(tp, USE_PHYLIB)) {
6003 (MAC_STATUS_SYNC_CHANGED |
6004 MAC_STATUS_CFG_CHANGED |
6005 MAC_STATUS_MI_COMPLETION |
6006 MAC_STATUS_LNKSTATE_CHANGED));
6009 tg3_setup_phy(tp, 0);
6010 spin_unlock(&tp->lock);
6015 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6016 struct tg3_rx_prodring_set *dpr,
6017 struct tg3_rx_prodring_set *spr)
6019 u32 si, di, cpycnt, src_prod_idx;
6023 src_prod_idx = spr->rx_std_prod_idx;
6025 /* Make sure updates to the rx_std_buffers[] entries and the
6026 * standard producer index are seen in the correct order.
6030 if (spr->rx_std_cons_idx == src_prod_idx)
6033 if (spr->rx_std_cons_idx < src_prod_idx)
6034 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6036 cpycnt = tp->rx_std_ring_mask + 1 -
6037 spr->rx_std_cons_idx;
6039 cpycnt = min(cpycnt,
6040 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6042 si = spr->rx_std_cons_idx;
6043 di = dpr->rx_std_prod_idx;
6045 for (i = di; i < di + cpycnt; i++) {
6046 if (dpr->rx_std_buffers[i].data) {
6056 /* Ensure that updates to the rx_std_buffers ring and the
6057 * shadowed hardware producer ring from tg3_recycle_skb() are
6058 * ordered correctly WRT the skb check above.
6062 memcpy(&dpr->rx_std_buffers[di],
6063 &spr->rx_std_buffers[si],
6064 cpycnt * sizeof(struct ring_info));
6066 for (i = 0; i < cpycnt; i++, di++, si++) {
6067 struct tg3_rx_buffer_desc *sbd, *dbd;
6068 sbd = &spr->rx_std[si];
6069 dbd = &dpr->rx_std[di];
6070 dbd->addr_hi = sbd->addr_hi;
6071 dbd->addr_lo = sbd->addr_lo;
6074 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6075 tp->rx_std_ring_mask;
6076 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6077 tp->rx_std_ring_mask;
6081 src_prod_idx = spr->rx_jmb_prod_idx;
6083 /* Make sure updates to the rx_jmb_buffers[] entries and
6084 * the jumbo producer index are seen in the correct order.
6088 if (spr->rx_jmb_cons_idx == src_prod_idx)
6091 if (spr->rx_jmb_cons_idx < src_prod_idx)
6092 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6094 cpycnt = tp->rx_jmb_ring_mask + 1 -
6095 spr->rx_jmb_cons_idx;
6097 cpycnt = min(cpycnt,
6098 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6100 si = spr->rx_jmb_cons_idx;
6101 di = dpr->rx_jmb_prod_idx;
6103 for (i = di; i < di + cpycnt; i++) {
6104 if (dpr->rx_jmb_buffers[i].data) {
6114 /* Ensure that updates to the rx_jmb_buffers ring and the
6115 * shadowed hardware producer ring from tg3_recycle_skb() are
6116 * ordered correctly WRT the skb check above.
6120 memcpy(&dpr->rx_jmb_buffers[di],
6121 &spr->rx_jmb_buffers[si],
6122 cpycnt * sizeof(struct ring_info));
6124 for (i = 0; i < cpycnt; i++, di++, si++) {
6125 struct tg3_rx_buffer_desc *sbd, *dbd;
6126 sbd = &spr->rx_jmb[si].std;
6127 dbd = &dpr->rx_jmb[di].std;
6128 dbd->addr_hi = sbd->addr_hi;
6129 dbd->addr_lo = sbd->addr_lo;
6132 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6133 tp->rx_jmb_ring_mask;
6134 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6135 tp->rx_jmb_ring_mask;
6141 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6143 struct tg3 *tp = tnapi->tp;
6145 /* run TX completion thread */
6146 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6148 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6152 if (!tnapi->rx_rcb_prod_idx)
6155 /* run RX thread, within the bounds set by NAPI.
6156 * All RX "locking" is done by ensuring outside
6157 * code synchronizes with tg3->napi.poll()
6159 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6160 work_done += tg3_rx(tnapi, budget - work_done);
6162 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6163 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6165 u32 std_prod_idx = dpr->rx_std_prod_idx;
6166 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6168 tp->rx_refill = false;
6169 for (i = 1; i < tp->irq_cnt; i++)
6170 err |= tg3_rx_prodring_xfer(tp, dpr,
6171 &tp->napi[i].prodring);
6175 if (std_prod_idx != dpr->rx_std_prod_idx)
6176 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6177 dpr->rx_std_prod_idx);
6179 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6180 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6181 dpr->rx_jmb_prod_idx);
6186 tw32_f(HOSTCC_MODE, tp->coal_now);
6192 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6194 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6195 schedule_work(&tp->reset_task);
6198 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6200 cancel_work_sync(&tp->reset_task);
6201 tg3_flag_clear(tp, RESET_TASK_PENDING);
6202 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6205 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6207 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6208 struct tg3 *tp = tnapi->tp;
6210 struct tg3_hw_status *sblk = tnapi->hw_status;
6213 work_done = tg3_poll_work(tnapi, work_done, budget);
6215 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6218 if (unlikely(work_done >= budget))
6221 /* tp->last_tag is used in tg3_int_reenable() below
6222 * to tell the hw how much work has been processed,
6223 * so we must read it before checking for more work.
6225 tnapi->last_tag = sblk->status_tag;
6226 tnapi->last_irq_tag = tnapi->last_tag;
6229 /* check for RX/TX work to do */
6230 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6231 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6233 /* This test here is not race free, but will reduce
6234 * the number of interrupts by looping again.
6236 if (tnapi == &tp->napi[1] && tp->rx_refill)
6239 napi_complete(napi);
6240 /* Reenable interrupts. */
6241 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6243 /* This test here is synchronized by napi_schedule()
6244 * and napi_complete() to close the race condition.
6246 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6247 tw32(HOSTCC_MODE, tp->coalesce_mode |
6248 HOSTCC_MODE_ENABLE |
6259 /* work_done is guaranteed to be less than budget. */
6260 napi_complete(napi);
6261 tg3_reset_task_schedule(tp);
6265 static void tg3_process_error(struct tg3 *tp)
6268 bool real_error = false;
6270 if (tg3_flag(tp, ERROR_PROCESSED))
6273 /* Check Flow Attention register */
6274 val = tr32(HOSTCC_FLOW_ATTN);
6275 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6276 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6280 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6281 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6285 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6286 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6295 tg3_flag_set(tp, ERROR_PROCESSED);
6296 tg3_reset_task_schedule(tp);
6299 static int tg3_poll(struct napi_struct *napi, int budget)
6301 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6302 struct tg3 *tp = tnapi->tp;
6304 struct tg3_hw_status *sblk = tnapi->hw_status;
6307 if (sblk->status & SD_STATUS_ERROR)
6308 tg3_process_error(tp);
6312 work_done = tg3_poll_work(tnapi, work_done, budget);
6314 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6317 if (unlikely(work_done >= budget))
6320 if (tg3_flag(tp, TAGGED_STATUS)) {
6321 /* tp->last_tag is used in tg3_int_reenable() below
6322 * to tell the hw how much work has been processed,
6323 * so we must read it before checking for more work.
6325 tnapi->last_tag = sblk->status_tag;
6326 tnapi->last_irq_tag = tnapi->last_tag;
6329 sblk->status &= ~SD_STATUS_UPDATED;
6331 if (likely(!tg3_has_work(tnapi))) {
6332 napi_complete(napi);
6333 tg3_int_reenable(tnapi);
6341 /* work_done is guaranteed to be less than budget. */
6342 napi_complete(napi);
6343 tg3_reset_task_schedule(tp);
6347 static void tg3_napi_disable(struct tg3 *tp)
6351 for (i = tp->irq_cnt - 1; i >= 0; i--)
6352 napi_disable(&tp->napi[i].napi);
6355 static void tg3_napi_enable(struct tg3 *tp)
6359 for (i = 0; i < tp->irq_cnt; i++)
6360 napi_enable(&tp->napi[i].napi);
6363 static void tg3_napi_init(struct tg3 *tp)
6367 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6368 for (i = 1; i < tp->irq_cnt; i++)
6369 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6372 static void tg3_napi_fini(struct tg3 *tp)
6376 for (i = 0; i < tp->irq_cnt; i++)
6377 netif_napi_del(&tp->napi[i].napi);
6380 static inline void tg3_netif_stop(struct tg3 *tp)
6382 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6383 tg3_napi_disable(tp);
6384 netif_tx_disable(tp->dev);
6387 static inline void tg3_netif_start(struct tg3 *tp)
6389 /* NOTE: unconditional netif_tx_wake_all_queues is only
6390 * appropriate so long as all callers are assured to
6391 * have free tx slots (such as after tg3_init_hw)
6393 netif_tx_wake_all_queues(tp->dev);
6395 tg3_napi_enable(tp);
6396 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6397 tg3_enable_ints(tp);
6400 static void tg3_irq_quiesce(struct tg3 *tp)
6404 BUG_ON(tp->irq_sync);
6409 for (i = 0; i < tp->irq_cnt; i++)
6410 synchronize_irq(tp->napi[i].irq_vec);
6413 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6414 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6415 * with as well. Most of the time, this is not necessary except when
6416 * shutting down the device.
6418 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6420 spin_lock_bh(&tp->lock);
6422 tg3_irq_quiesce(tp);
6425 static inline void tg3_full_unlock(struct tg3 *tp)
6427 spin_unlock_bh(&tp->lock);
6430 /* One-shot MSI handler - Chip automatically disables interrupt
6431 * after sending MSI so driver doesn't have to do it.
6433 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6435 struct tg3_napi *tnapi = dev_id;
6436 struct tg3 *tp = tnapi->tp;
6438 prefetch(tnapi->hw_status);
6440 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6442 if (likely(!tg3_irq_sync(tp)))
6443 napi_schedule(&tnapi->napi);
6448 /* MSI ISR - No need to check for interrupt sharing and no need to
6449 * flush status block and interrupt mailbox. PCI ordering rules
6450 * guarantee that MSI will arrive after the status block.
6452 static irqreturn_t tg3_msi(int irq, void *dev_id)
6454 struct tg3_napi *tnapi = dev_id;
6455 struct tg3 *tp = tnapi->tp;
6457 prefetch(tnapi->hw_status);
6459 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6461 * Writing any value to intr-mbox-0 clears PCI INTA# and
6462 * chip-internal interrupt pending events.
6463 * Writing non-zero to intr-mbox-0 additional tells the
6464 * NIC to stop sending us irqs, engaging "in-intr-handler"
6467 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6468 if (likely(!tg3_irq_sync(tp)))
6469 napi_schedule(&tnapi->napi);
6471 return IRQ_RETVAL(1);
6474 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6476 struct tg3_napi *tnapi = dev_id;
6477 struct tg3 *tp = tnapi->tp;
6478 struct tg3_hw_status *sblk = tnapi->hw_status;
6479 unsigned int handled = 1;
6481 /* In INTx mode, it is possible for the interrupt to arrive at
6482 * the CPU before the status block posted prior to the interrupt.
6483 * Reading the PCI State register will confirm whether the
6484 * interrupt is ours and will flush the status block.
6486 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6487 if (tg3_flag(tp, CHIP_RESETTING) ||
6488 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6495 * Writing any value to intr-mbox-0 clears PCI INTA# and
6496 * chip-internal interrupt pending events.
6497 * Writing non-zero to intr-mbox-0 additional tells the
6498 * NIC to stop sending us irqs, engaging "in-intr-handler"
6501 * Flush the mailbox to de-assert the IRQ immediately to prevent
6502 * spurious interrupts. The flush impacts performance but
6503 * excessive spurious interrupts can be worse in some cases.
6505 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6506 if (tg3_irq_sync(tp))
6508 sblk->status &= ~SD_STATUS_UPDATED;
6509 if (likely(tg3_has_work(tnapi))) {
6510 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6511 napi_schedule(&tnapi->napi);
6513 /* No work, shared interrupt perhaps? re-enable
6514 * interrupts, and flush that PCI write
6516 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6520 return IRQ_RETVAL(handled);
6523 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6525 struct tg3_napi *tnapi = dev_id;
6526 struct tg3 *tp = tnapi->tp;
6527 struct tg3_hw_status *sblk = tnapi->hw_status;
6528 unsigned int handled = 1;
6530 /* In INTx mode, it is possible for the interrupt to arrive at
6531 * the CPU before the status block posted prior to the interrupt.
6532 * Reading the PCI State register will confirm whether the
6533 * interrupt is ours and will flush the status block.
6535 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6536 if (tg3_flag(tp, CHIP_RESETTING) ||
6537 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6544 * writing any value to intr-mbox-0 clears PCI INTA# and
6545 * chip-internal interrupt pending events.
6546 * writing non-zero to intr-mbox-0 additional tells the
6547 * NIC to stop sending us irqs, engaging "in-intr-handler"
6550 * Flush the mailbox to de-assert the IRQ immediately to prevent
6551 * spurious interrupts. The flush impacts performance but
6552 * excessive spurious interrupts can be worse in some cases.
6554 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6557 * In a shared interrupt configuration, sometimes other devices'
6558 * interrupts will scream. We record the current status tag here
6559 * so that the above check can report that the screaming interrupts
6560 * are unhandled. Eventually they will be silenced.
6562 tnapi->last_irq_tag = sblk->status_tag;
6564 if (tg3_irq_sync(tp))
6567 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6569 napi_schedule(&tnapi->napi);
6572 return IRQ_RETVAL(handled);
6575 /* ISR for interrupt test */
6576 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6578 struct tg3_napi *tnapi = dev_id;
6579 struct tg3 *tp = tnapi->tp;
6580 struct tg3_hw_status *sblk = tnapi->hw_status;
6582 if ((sblk->status & SD_STATUS_UPDATED) ||
6583 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6584 tg3_disable_ints(tp);
6585 return IRQ_RETVAL(1);
6587 return IRQ_RETVAL(0);
6590 #ifdef CONFIG_NET_POLL_CONTROLLER
6591 static void tg3_poll_controller(struct net_device *dev)
6594 struct tg3 *tp = netdev_priv(dev);
6596 for (i = 0; i < tp->irq_cnt; i++)
6597 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6601 static void tg3_tx_timeout(struct net_device *dev)
6603 struct tg3 *tp = netdev_priv(dev);
6605 if (netif_msg_tx_err(tp)) {
6606 netdev_err(dev, "transmit timed out, resetting\n");
6610 tg3_reset_task_schedule(tp);
6613 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6614 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6616 u32 base = (u32) mapping & 0xffffffff;
6618 return (base > 0xffffdcc0) && (base + len + 8 < base);
6621 /* Test for DMA addresses > 40-bit */
6622 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6625 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6626 if (tg3_flag(tp, 40BIT_DMA_BUG))
6627 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6634 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6635 dma_addr_t mapping, u32 len, u32 flags,
6638 txbd->addr_hi = ((u64) mapping >> 32);
6639 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6640 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6641 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6644 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6645 dma_addr_t map, u32 len, u32 flags,
6648 struct tg3 *tp = tnapi->tp;
6651 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6654 if (tg3_4g_overflow_test(map, len))
6657 if (tg3_40bit_overflow_test(tp, map, len))
6660 if (tp->dma_limit) {
6661 u32 prvidx = *entry;
6662 u32 tmp_flag = flags & ~TXD_FLAG_END;
6663 while (len > tp->dma_limit && *budget) {
6664 u32 frag_len = tp->dma_limit;
6665 len -= tp->dma_limit;
6667 /* Avoid the 8byte DMA problem */
6669 len += tp->dma_limit / 2;
6670 frag_len = tp->dma_limit / 2;
6673 tnapi->tx_buffers[*entry].fragmented = true;
6675 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6676 frag_len, tmp_flag, mss, vlan);
6679 *entry = NEXT_TX(*entry);
6686 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6687 len, flags, mss, vlan);
6689 *entry = NEXT_TX(*entry);
6692 tnapi->tx_buffers[prvidx].fragmented = false;
6696 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6697 len, flags, mss, vlan);
6698 *entry = NEXT_TX(*entry);
6704 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6707 struct sk_buff *skb;
6708 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6713 pci_unmap_single(tnapi->tp->pdev,
6714 dma_unmap_addr(txb, mapping),
6718 while (txb->fragmented) {
6719 txb->fragmented = false;
6720 entry = NEXT_TX(entry);
6721 txb = &tnapi->tx_buffers[entry];
6724 for (i = 0; i <= last; i++) {
6725 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6727 entry = NEXT_TX(entry);
6728 txb = &tnapi->tx_buffers[entry];
6730 pci_unmap_page(tnapi->tp->pdev,
6731 dma_unmap_addr(txb, mapping),
6732 skb_frag_size(frag), PCI_DMA_TODEVICE);
6734 while (txb->fragmented) {
6735 txb->fragmented = false;
6736 entry = NEXT_TX(entry);
6737 txb = &tnapi->tx_buffers[entry];
6742 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6743 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6744 struct sk_buff **pskb,
6745 u32 *entry, u32 *budget,
6746 u32 base_flags, u32 mss, u32 vlan)
6748 struct tg3 *tp = tnapi->tp;
6749 struct sk_buff *new_skb, *skb = *pskb;
6750 dma_addr_t new_addr = 0;
6753 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6754 new_skb = skb_copy(skb, GFP_ATOMIC);
6756 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6758 new_skb = skb_copy_expand(skb,
6759 skb_headroom(skb) + more_headroom,
6760 skb_tailroom(skb), GFP_ATOMIC);
6766 /* New SKB is guaranteed to be linear. */
6767 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6769 /* Make sure the mapping succeeded */
6770 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6771 dev_kfree_skb(new_skb);
6774 u32 save_entry = *entry;
6776 base_flags |= TXD_FLAG_END;
6778 tnapi->tx_buffers[*entry].skb = new_skb;
6779 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6782 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6783 new_skb->len, base_flags,
6785 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6786 dev_kfree_skb(new_skb);
6797 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6799 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6800 * TSO header is greater than 80 bytes.
6802 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6804 struct sk_buff *segs, *nskb;
6805 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6807 /* Estimate the number of fragments in the worst case */
6808 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6809 netif_stop_queue(tp->dev);
6811 /* netif_tx_stop_queue() must be done before checking
6812 * checking tx index in tg3_tx_avail() below, because in
6813 * tg3_tx(), we update tx index before checking for
6814 * netif_tx_queue_stopped().
6817 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6818 return NETDEV_TX_BUSY;
6820 netif_wake_queue(tp->dev);
6823 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6825 goto tg3_tso_bug_end;
6831 tg3_start_xmit(nskb, tp->dev);
6837 return NETDEV_TX_OK;
6840 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6841 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6843 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6845 struct tg3 *tp = netdev_priv(dev);
6846 u32 len, entry, base_flags, mss, vlan = 0;
6848 int i = -1, would_hit_hwbug;
6850 struct tg3_napi *tnapi;
6851 struct netdev_queue *txq;
6854 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6855 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6856 if (tg3_flag(tp, ENABLE_TSS))
6859 budget = tg3_tx_avail(tnapi);
6861 /* We are running in BH disabled context with netif_tx_lock
6862 * and TX reclaim runs via tp->napi.poll inside of a software
6863 * interrupt. Furthermore, IRQ processing runs lockless so we have
6864 * no IRQ context deadlocks to worry about either. Rejoice!
6866 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6867 if (!netif_tx_queue_stopped(txq)) {
6868 netif_tx_stop_queue(txq);
6870 /* This is a hard error, log it. */
6872 "BUG! Tx Ring full when queue awake!\n");
6874 return NETDEV_TX_BUSY;
6877 entry = tnapi->tx_prod;
6879 if (skb->ip_summed == CHECKSUM_PARTIAL)
6880 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6882 mss = skb_shinfo(skb)->gso_size;
6885 u32 tcp_opt_len, hdr_len;
6887 if (skb_header_cloned(skb) &&
6888 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6892 tcp_opt_len = tcp_optlen(skb);
6894 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6896 if (!skb_is_gso_v6(skb)) {
6898 iph->tot_len = htons(mss + hdr_len);
6901 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6902 tg3_flag(tp, TSO_BUG))
6903 return tg3_tso_bug(tp, skb);
6905 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6906 TXD_FLAG_CPU_POST_DMA);
6908 if (tg3_flag(tp, HW_TSO_1) ||
6909 tg3_flag(tp, HW_TSO_2) ||
6910 tg3_flag(tp, HW_TSO_3)) {
6911 tcp_hdr(skb)->check = 0;
6912 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6914 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6919 if (tg3_flag(tp, HW_TSO_3)) {
6920 mss |= (hdr_len & 0xc) << 12;
6922 base_flags |= 0x00000010;
6923 base_flags |= (hdr_len & 0x3e0) << 5;
6924 } else if (tg3_flag(tp, HW_TSO_2))
6925 mss |= hdr_len << 9;
6926 else if (tg3_flag(tp, HW_TSO_1) ||
6927 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6928 if (tcp_opt_len || iph->ihl > 5) {
6931 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6932 mss |= (tsflags << 11);
6935 if (tcp_opt_len || iph->ihl > 5) {
6938 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6939 base_flags |= tsflags << 12;
6944 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6945 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6946 base_flags |= TXD_FLAG_JMB_PKT;
6948 if (vlan_tx_tag_present(skb)) {
6949 base_flags |= TXD_FLAG_VLAN;
6950 vlan = vlan_tx_tag_get(skb);
6953 len = skb_headlen(skb);
6955 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6956 if (pci_dma_mapping_error(tp->pdev, mapping))
6960 tnapi->tx_buffers[entry].skb = skb;
6961 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6963 would_hit_hwbug = 0;
6965 if (tg3_flag(tp, 5701_DMA_BUG))
6966 would_hit_hwbug = 1;
6968 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6969 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6971 would_hit_hwbug = 1;
6972 } else if (skb_shinfo(skb)->nr_frags > 0) {
6975 if (!tg3_flag(tp, HW_TSO_1) &&
6976 !tg3_flag(tp, HW_TSO_2) &&
6977 !tg3_flag(tp, HW_TSO_3))
6980 /* Now loop through additional data
6981 * fragments, and queue them.
6983 last = skb_shinfo(skb)->nr_frags - 1;
6984 for (i = 0; i <= last; i++) {
6985 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6987 len = skb_frag_size(frag);
6988 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6989 len, DMA_TO_DEVICE);
6991 tnapi->tx_buffers[entry].skb = NULL;
6992 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6994 if (dma_mapping_error(&tp->pdev->dev, mapping))
6998 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7000 ((i == last) ? TXD_FLAG_END : 0),
7002 would_hit_hwbug = 1;
7008 if (would_hit_hwbug) {
7009 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7011 /* If the workaround fails due to memory/mapping
7012 * failure, silently drop this packet.
7014 entry = tnapi->tx_prod;
7015 budget = tg3_tx_avail(tnapi);
7016 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7017 base_flags, mss, vlan))
7021 skb_tx_timestamp(skb);
7022 netdev_tx_sent_queue(txq, skb->len);
7024 /* Sync BD data before updating mailbox */
7027 /* Packets are ready, update Tx producer idx local and on card. */
7028 tw32_tx_mbox(tnapi->prodmbox, entry);
7030 tnapi->tx_prod = entry;
7031 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7032 netif_tx_stop_queue(txq);
7034 /* netif_tx_stop_queue() must be done before checking
7035 * checking tx index in tg3_tx_avail() below, because in
7036 * tg3_tx(), we update tx index before checking for
7037 * netif_tx_queue_stopped().
7040 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7041 netif_tx_wake_queue(txq);
7045 return NETDEV_TX_OK;
7048 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7049 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7054 return NETDEV_TX_OK;
7057 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7060 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7061 MAC_MODE_PORT_MODE_MASK);
7063 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7065 if (!tg3_flag(tp, 5705_PLUS))
7066 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7068 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7069 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7071 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7073 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7075 if (tg3_flag(tp, 5705_PLUS) ||
7076 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7077 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7078 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7081 tw32(MAC_MODE, tp->mac_mode);
7085 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7087 u32 val, bmcr, mac_mode, ptest = 0;
7089 tg3_phy_toggle_apd(tp, false);
7090 tg3_phy_toggle_automdix(tp, 0);
7092 if (extlpbk && tg3_phy_set_extloopbk(tp))
7095 bmcr = BMCR_FULLDPLX;
7100 bmcr |= BMCR_SPEED100;
7104 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7106 bmcr |= BMCR_SPEED100;
7109 bmcr |= BMCR_SPEED1000;
7114 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7115 tg3_readphy(tp, MII_CTRL1000, &val);
7116 val |= CTL1000_AS_MASTER |
7117 CTL1000_ENABLE_MASTER;
7118 tg3_writephy(tp, MII_CTRL1000, val);
7120 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7121 MII_TG3_FET_PTEST_TRIM_2;
7122 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7125 bmcr |= BMCR_LOOPBACK;
7127 tg3_writephy(tp, MII_BMCR, bmcr);
7129 /* The write needs to be flushed for the FETs */
7130 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7131 tg3_readphy(tp, MII_BMCR, &bmcr);
7135 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7137 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7138 MII_TG3_FET_PTEST_FRC_TX_LINK |
7139 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7141 /* The write needs to be flushed for the AC131 */
7142 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7145 /* Reset to prevent losing 1st rx packet intermittently */
7146 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7147 tg3_flag(tp, 5780_CLASS)) {
7148 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7150 tw32_f(MAC_RX_MODE, tp->rx_mode);
7153 mac_mode = tp->mac_mode &
7154 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7155 if (speed == SPEED_1000)
7156 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7158 mac_mode |= MAC_MODE_PORT_MODE_MII;
7160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7161 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7163 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7164 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7165 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7166 mac_mode |= MAC_MODE_LINK_POLARITY;
7168 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7169 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7172 tw32(MAC_MODE, mac_mode);
7178 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7180 struct tg3 *tp = netdev_priv(dev);
7182 if (features & NETIF_F_LOOPBACK) {
7183 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7186 spin_lock_bh(&tp->lock);
7187 tg3_mac_loopback(tp, true);
7188 netif_carrier_on(tp->dev);
7189 spin_unlock_bh(&tp->lock);
7190 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7192 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7195 spin_lock_bh(&tp->lock);
7196 tg3_mac_loopback(tp, false);
7197 /* Force link status check */
7198 tg3_setup_phy(tp, 1);
7199 spin_unlock_bh(&tp->lock);
7200 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7204 static netdev_features_t tg3_fix_features(struct net_device *dev,
7205 netdev_features_t features)
7207 struct tg3 *tp = netdev_priv(dev);
7209 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7210 features &= ~NETIF_F_ALL_TSO;
7215 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7217 netdev_features_t changed = dev->features ^ features;
7219 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7220 tg3_set_loopback(dev, features);
7225 static void tg3_rx_prodring_free(struct tg3 *tp,
7226 struct tg3_rx_prodring_set *tpr)
7230 if (tpr != &tp->napi[0].prodring) {
7231 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7232 i = (i + 1) & tp->rx_std_ring_mask)
7233 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7236 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7237 for (i = tpr->rx_jmb_cons_idx;
7238 i != tpr->rx_jmb_prod_idx;
7239 i = (i + 1) & tp->rx_jmb_ring_mask) {
7240 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7248 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7249 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7252 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7253 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7254 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7259 /* Initialize rx rings for packet processing.
7261 * The chip has been shut down and the driver detached from
7262 * the networking, so no interrupts or new tx packets will
7263 * end up in the driver. tp->{tx,}lock are held and thus
7266 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7267 struct tg3_rx_prodring_set *tpr)
7269 u32 i, rx_pkt_dma_sz;
7271 tpr->rx_std_cons_idx = 0;
7272 tpr->rx_std_prod_idx = 0;
7273 tpr->rx_jmb_cons_idx = 0;
7274 tpr->rx_jmb_prod_idx = 0;
7276 if (tpr != &tp->napi[0].prodring) {
7277 memset(&tpr->rx_std_buffers[0], 0,
7278 TG3_RX_STD_BUFF_RING_SIZE(tp));
7279 if (tpr->rx_jmb_buffers)
7280 memset(&tpr->rx_jmb_buffers[0], 0,
7281 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7285 /* Zero out all descriptors. */
7286 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7288 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7289 if (tg3_flag(tp, 5780_CLASS) &&
7290 tp->dev->mtu > ETH_DATA_LEN)
7291 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7292 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7294 /* Initialize invariants of the rings, we only set this
7295 * stuff once. This works because the card does not
7296 * write into the rx buffer posting rings.
7298 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7299 struct tg3_rx_buffer_desc *rxd;
7301 rxd = &tpr->rx_std[i];
7302 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7303 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7304 rxd->opaque = (RXD_OPAQUE_RING_STD |
7305 (i << RXD_OPAQUE_INDEX_SHIFT));
7308 /* Now allocate fresh SKBs for each rx ring. */
7309 for (i = 0; i < tp->rx_pending; i++) {
7310 unsigned int frag_size;
7312 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7314 netdev_warn(tp->dev,
7315 "Using a smaller RX standard ring. Only "
7316 "%d out of %d buffers were allocated "
7317 "successfully\n", i, tp->rx_pending);
7325 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7328 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7330 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7333 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7334 struct tg3_rx_buffer_desc *rxd;
7336 rxd = &tpr->rx_jmb[i].std;
7337 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7338 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7340 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7341 (i << RXD_OPAQUE_INDEX_SHIFT));
7344 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7345 unsigned int frag_size;
7347 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7349 netdev_warn(tp->dev,
7350 "Using a smaller RX jumbo ring. Only %d "
7351 "out of %d buffers were allocated "
7352 "successfully\n", i, tp->rx_jumbo_pending);
7355 tp->rx_jumbo_pending = i;
7364 tg3_rx_prodring_free(tp, tpr);
7368 static void tg3_rx_prodring_fini(struct tg3 *tp,
7369 struct tg3_rx_prodring_set *tpr)
7371 kfree(tpr->rx_std_buffers);
7372 tpr->rx_std_buffers = NULL;
7373 kfree(tpr->rx_jmb_buffers);
7374 tpr->rx_jmb_buffers = NULL;
7376 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7377 tpr->rx_std, tpr->rx_std_mapping);
7381 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7382 tpr->rx_jmb, tpr->rx_jmb_mapping);
7387 static int tg3_rx_prodring_init(struct tg3 *tp,
7388 struct tg3_rx_prodring_set *tpr)
7390 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7392 if (!tpr->rx_std_buffers)
7395 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7396 TG3_RX_STD_RING_BYTES(tp),
7397 &tpr->rx_std_mapping,
7402 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7403 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7405 if (!tpr->rx_jmb_buffers)
7408 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7409 TG3_RX_JMB_RING_BYTES(tp),
7410 &tpr->rx_jmb_mapping,
7419 tg3_rx_prodring_fini(tp, tpr);
7423 /* Free up pending packets in all rx/tx rings.
7425 * The chip has been shut down and the driver detached from
7426 * the networking, so no interrupts or new tx packets will
7427 * end up in the driver. tp->{tx,}lock is not held and we are not
7428 * in an interrupt context and thus may sleep.
7430 static void tg3_free_rings(struct tg3 *tp)
7434 for (j = 0; j < tp->irq_cnt; j++) {
7435 struct tg3_napi *tnapi = &tp->napi[j];
7437 tg3_rx_prodring_free(tp, &tnapi->prodring);
7439 if (!tnapi->tx_buffers)
7442 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7443 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7448 tg3_tx_skb_unmap(tnapi, i,
7449 skb_shinfo(skb)->nr_frags - 1);
7451 dev_kfree_skb_any(skb);
7453 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7457 /* Initialize tx/rx rings for packet processing.
7459 * The chip has been shut down and the driver detached from
7460 * the networking, so no interrupts or new tx packets will
7461 * end up in the driver. tp->{tx,}lock are held and thus
7464 static int tg3_init_rings(struct tg3 *tp)
7468 /* Free up all the SKBs. */
7471 for (i = 0; i < tp->irq_cnt; i++) {
7472 struct tg3_napi *tnapi = &tp->napi[i];
7474 tnapi->last_tag = 0;
7475 tnapi->last_irq_tag = 0;
7476 tnapi->hw_status->status = 0;
7477 tnapi->hw_status->status_tag = 0;
7478 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7483 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7485 tnapi->rx_rcb_ptr = 0;
7487 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7489 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7499 * Must not be invoked with interrupt sources disabled and
7500 * the hardware shutdown down.
7502 static void tg3_free_consistent(struct tg3 *tp)
7506 for (i = 0; i < tp->irq_cnt; i++) {
7507 struct tg3_napi *tnapi = &tp->napi[i];
7509 if (tnapi->tx_ring) {
7510 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7511 tnapi->tx_ring, tnapi->tx_desc_mapping);
7512 tnapi->tx_ring = NULL;
7515 kfree(tnapi->tx_buffers);
7516 tnapi->tx_buffers = NULL;
7518 if (tnapi->rx_rcb) {
7519 dma_free_coherent(&tp->pdev->dev,
7520 TG3_RX_RCB_RING_BYTES(tp),
7522 tnapi->rx_rcb_mapping);
7523 tnapi->rx_rcb = NULL;
7526 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7528 if (tnapi->hw_status) {
7529 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7531 tnapi->status_mapping);
7532 tnapi->hw_status = NULL;
7537 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7538 tp->hw_stats, tp->stats_mapping);
7539 tp->hw_stats = NULL;
7544 * Must not be invoked with interrupt sources disabled and
7545 * the hardware shutdown down. Can sleep.
7547 static int tg3_alloc_consistent(struct tg3 *tp)
7551 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7552 sizeof(struct tg3_hw_stats),
7558 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7560 for (i = 0; i < tp->irq_cnt; i++) {
7561 struct tg3_napi *tnapi = &tp->napi[i];
7562 struct tg3_hw_status *sblk;
7564 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7566 &tnapi->status_mapping,
7568 if (!tnapi->hw_status)
7571 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7572 sblk = tnapi->hw_status;
7574 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7577 /* If multivector TSS is enabled, vector 0 does not handle
7578 * tx interrupts. Don't allocate any resources for it.
7580 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7581 (i && tg3_flag(tp, ENABLE_TSS))) {
7582 tnapi->tx_buffers = kzalloc(
7583 sizeof(struct tg3_tx_ring_info) *
7584 TG3_TX_RING_SIZE, GFP_KERNEL);
7585 if (!tnapi->tx_buffers)
7588 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7590 &tnapi->tx_desc_mapping,
7592 if (!tnapi->tx_ring)
7597 * When RSS is enabled, the status block format changes
7598 * slightly. The "rx_jumbo_consumer", "reserved",
7599 * and "rx_mini_consumer" members get mapped to the
7600 * other three rx return ring producer indexes.
7604 if (tg3_flag(tp, ENABLE_RSS)) {
7605 tnapi->rx_rcb_prod_idx = NULL;
7610 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7613 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7616 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7619 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7624 * If multivector RSS is enabled, vector 0 does not handle
7625 * rx or tx interrupts. Don't allocate any resources for it.
7627 if (!i && tg3_flag(tp, ENABLE_RSS))
7630 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7631 TG3_RX_RCB_RING_BYTES(tp),
7632 &tnapi->rx_rcb_mapping,
7637 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7643 tg3_free_consistent(tp);
7647 #define MAX_WAIT_CNT 1000
7649 /* To stop a block, clear the enable bit and poll till it
7650 * clears. tp->lock is held.
7652 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7657 if (tg3_flag(tp, 5705_PLUS)) {
7664 /* We can't enable/disable these bits of the
7665 * 5705/5750, just say success.
7678 for (i = 0; i < MAX_WAIT_CNT; i++) {
7681 if ((val & enable_bit) == 0)
7685 if (i == MAX_WAIT_CNT && !silent) {
7686 dev_err(&tp->pdev->dev,
7687 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7695 /* tp->lock is held. */
7696 static int tg3_abort_hw(struct tg3 *tp, int silent)
7700 tg3_disable_ints(tp);
7702 tp->rx_mode &= ~RX_MODE_ENABLE;
7703 tw32_f(MAC_RX_MODE, tp->rx_mode);
7706 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7707 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7708 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7709 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7710 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7711 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7713 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7714 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7715 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7716 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7717 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7718 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7719 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7721 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7722 tw32_f(MAC_MODE, tp->mac_mode);
7725 tp->tx_mode &= ~TX_MODE_ENABLE;
7726 tw32_f(MAC_TX_MODE, tp->tx_mode);
7728 for (i = 0; i < MAX_WAIT_CNT; i++) {
7730 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7733 if (i >= MAX_WAIT_CNT) {
7734 dev_err(&tp->pdev->dev,
7735 "%s timed out, TX_MODE_ENABLE will not clear "
7736 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7740 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7741 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7742 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7744 tw32(FTQ_RESET, 0xffffffff);
7745 tw32(FTQ_RESET, 0x00000000);
7747 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7748 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7750 for (i = 0; i < tp->irq_cnt; i++) {
7751 struct tg3_napi *tnapi = &tp->napi[i];
7752 if (tnapi->hw_status)
7753 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7759 /* Save PCI command register before chip reset */
7760 static void tg3_save_pci_state(struct tg3 *tp)
7762 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7765 /* Restore PCI state after chip reset */
7766 static void tg3_restore_pci_state(struct tg3 *tp)
7770 /* Re-enable indirect register accesses. */
7771 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7772 tp->misc_host_ctrl);
7774 /* Set MAX PCI retry to zero. */
7775 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7776 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7777 tg3_flag(tp, PCIX_MODE))
7778 val |= PCISTATE_RETRY_SAME_DMA;
7779 /* Allow reads and writes to the APE register and memory space. */
7780 if (tg3_flag(tp, ENABLE_APE))
7781 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7782 PCISTATE_ALLOW_APE_SHMEM_WR |
7783 PCISTATE_ALLOW_APE_PSPACE_WR;
7784 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7786 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7788 if (!tg3_flag(tp, PCI_EXPRESS)) {
7789 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7790 tp->pci_cacheline_sz);
7791 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7795 /* Make sure PCI-X relaxed ordering bit is clear. */
7796 if (tg3_flag(tp, PCIX_MODE)) {
7799 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7801 pcix_cmd &= ~PCI_X_CMD_ERO;
7802 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7806 if (tg3_flag(tp, 5780_CLASS)) {
7808 /* Chip reset on 5780 will reset MSI enable bit,
7809 * so need to restore it.
7811 if (tg3_flag(tp, USING_MSI)) {
7814 pci_read_config_word(tp->pdev,
7815 tp->msi_cap + PCI_MSI_FLAGS,
7817 pci_write_config_word(tp->pdev,
7818 tp->msi_cap + PCI_MSI_FLAGS,
7819 ctrl | PCI_MSI_FLAGS_ENABLE);
7820 val = tr32(MSGINT_MODE);
7821 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7826 /* tp->lock is held. */
7827 static int tg3_chip_reset(struct tg3 *tp)
7830 void (*write_op)(struct tg3 *, u32, u32);
7835 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7837 /* No matching tg3_nvram_unlock() after this because
7838 * chip reset below will undo the nvram lock.
7840 tp->nvram_lock_cnt = 0;
7842 /* GRC_MISC_CFG core clock reset will clear the memory
7843 * enable bit in PCI register 4 and the MSI enable bit
7844 * on some chips, so we save relevant registers here.
7846 tg3_save_pci_state(tp);
7848 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7849 tg3_flag(tp, 5755_PLUS))
7850 tw32(GRC_FASTBOOT_PC, 0);
7853 * We must avoid the readl() that normally takes place.
7854 * It locks machines, causes machine checks, and other
7855 * fun things. So, temporarily disable the 5701
7856 * hardware workaround, while we do the reset.
7858 write_op = tp->write32;
7859 if (write_op == tg3_write_flush_reg32)
7860 tp->write32 = tg3_write32;
7862 /* Prevent the irq handler from reading or writing PCI registers
7863 * during chip reset when the memory enable bit in the PCI command
7864 * register may be cleared. The chip does not generate interrupt
7865 * at this time, but the irq handler may still be called due to irq
7866 * sharing or irqpoll.
7868 tg3_flag_set(tp, CHIP_RESETTING);
7869 for (i = 0; i < tp->irq_cnt; i++) {
7870 struct tg3_napi *tnapi = &tp->napi[i];
7871 if (tnapi->hw_status) {
7872 tnapi->hw_status->status = 0;
7873 tnapi->hw_status->status_tag = 0;
7875 tnapi->last_tag = 0;
7876 tnapi->last_irq_tag = 0;
7880 for (i = 0; i < tp->irq_cnt; i++)
7881 synchronize_irq(tp->napi[i].irq_vec);
7883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7884 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7885 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7889 val = GRC_MISC_CFG_CORECLK_RESET;
7891 if (tg3_flag(tp, PCI_EXPRESS)) {
7892 /* Force PCIe 1.0a mode */
7893 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7894 !tg3_flag(tp, 57765_PLUS) &&
7895 tr32(TG3_PCIE_PHY_TSTCTL) ==
7896 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7897 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7899 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7900 tw32(GRC_MISC_CFG, (1 << 29));
7905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7906 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7907 tw32(GRC_VCPU_EXT_CTRL,
7908 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7911 /* Manage gphy power for all CPMU absent PCIe devices. */
7912 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7913 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7915 tw32(GRC_MISC_CFG, val);
7917 /* restore 5701 hardware bug workaround write method */
7918 tp->write32 = write_op;
7920 /* Unfortunately, we have to delay before the PCI read back.
7921 * Some 575X chips even will not respond to a PCI cfg access
7922 * when the reset command is given to the chip.
7924 * How do these hardware designers expect things to work
7925 * properly if the PCI write is posted for a long period
7926 * of time? It is always necessary to have some method by
7927 * which a register read back can occur to push the write
7928 * out which does the reset.
7930 * For most tg3 variants the trick below was working.
7935 /* Flush PCI posted writes. The normal MMIO registers
7936 * are inaccessible at this time so this is the only
7937 * way to make this reliably (actually, this is no longer
7938 * the case, see above). I tried to use indirect
7939 * register read/write but this upset some 5701 variants.
7941 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7945 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7948 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7952 /* Wait for link training to complete. */
7953 for (i = 0; i < 5000; i++)
7956 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7957 pci_write_config_dword(tp->pdev, 0xc4,
7958 cfg_val | (1 << 15));
7961 /* Clear the "no snoop" and "relaxed ordering" bits. */
7962 pci_read_config_word(tp->pdev,
7963 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7965 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7966 PCI_EXP_DEVCTL_NOSNOOP_EN);
7968 * Older PCIe devices only support the 128 byte
7969 * MPS setting. Enforce the restriction.
7971 if (!tg3_flag(tp, CPMU_PRESENT))
7972 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7973 pci_write_config_word(tp->pdev,
7974 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7977 /* Clear error status */
7978 pci_write_config_word(tp->pdev,
7979 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7980 PCI_EXP_DEVSTA_CED |
7981 PCI_EXP_DEVSTA_NFED |
7982 PCI_EXP_DEVSTA_FED |
7983 PCI_EXP_DEVSTA_URD);
7986 tg3_restore_pci_state(tp);
7988 tg3_flag_clear(tp, CHIP_RESETTING);
7989 tg3_flag_clear(tp, ERROR_PROCESSED);
7992 if (tg3_flag(tp, 5780_CLASS))
7993 val = tr32(MEMARB_MODE);
7994 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7996 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7998 tw32(0x5000, 0x400);
8001 tw32(GRC_MODE, tp->grc_mode);
8003 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8006 tw32(0xc4, val | (1 << 15));
8009 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8011 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8012 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8013 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8014 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8017 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8018 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8020 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8021 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8026 tw32_f(MAC_MODE, val);
8029 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8031 err = tg3_poll_fw(tp);
8037 if (tg3_flag(tp, PCI_EXPRESS) &&
8038 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8039 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8040 !tg3_flag(tp, 57765_PLUS)) {
8043 tw32(0x7c00, val | (1 << 25));
8046 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8047 val = tr32(TG3_CPMU_CLCK_ORIDE);
8048 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8051 /* Reprobe ASF enable state. */
8052 tg3_flag_clear(tp, ENABLE_ASF);
8053 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8054 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8055 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8058 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8059 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8060 tg3_flag_set(tp, ENABLE_ASF);
8061 tp->last_event_jiffies = jiffies;
8062 if (tg3_flag(tp, 5750_PLUS))
8063 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8070 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8071 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8073 /* tp->lock is held. */
8074 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8080 tg3_write_sig_pre_reset(tp, kind);
8082 tg3_abort_hw(tp, silent);
8083 err = tg3_chip_reset(tp);
8085 __tg3_set_mac_addr(tp, 0);
8087 tg3_write_sig_legacy(tp, kind);
8088 tg3_write_sig_post_reset(tp, kind);
8091 /* Save the stats across chip resets... */
8092 tg3_get_nstats(tp, &tp->net_stats_prev);
8093 tg3_get_estats(tp, &tp->estats_prev);
8095 /* And make sure the next sample is new data */
8096 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8105 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8107 struct tg3 *tp = netdev_priv(dev);
8108 struct sockaddr *addr = p;
8109 int err = 0, skip_mac_1 = 0;
8111 if (!is_valid_ether_addr(addr->sa_data))
8112 return -EADDRNOTAVAIL;
8114 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8116 if (!netif_running(dev))
8119 if (tg3_flag(tp, ENABLE_ASF)) {
8120 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8122 addr0_high = tr32(MAC_ADDR_0_HIGH);
8123 addr0_low = tr32(MAC_ADDR_0_LOW);
8124 addr1_high = tr32(MAC_ADDR_1_HIGH);
8125 addr1_low = tr32(MAC_ADDR_1_LOW);
8127 /* Skip MAC addr 1 if ASF is using it. */
8128 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8129 !(addr1_high == 0 && addr1_low == 0))
8132 spin_lock_bh(&tp->lock);
8133 __tg3_set_mac_addr(tp, skip_mac_1);
8134 spin_unlock_bh(&tp->lock);
8139 /* tp->lock is held. */
8140 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8141 dma_addr_t mapping, u32 maxlen_flags,
8145 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8146 ((u64) mapping >> 32));
8148 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8149 ((u64) mapping & 0xffffffff));
8151 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8154 if (!tg3_flag(tp, 5705_PLUS))
8156 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8160 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8164 if (!tg3_flag(tp, ENABLE_TSS)) {
8165 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8166 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8167 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8169 tw32(HOSTCC_TXCOL_TICKS, 0);
8170 tw32(HOSTCC_TXMAX_FRAMES, 0);
8171 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8174 if (!tg3_flag(tp, ENABLE_RSS)) {
8175 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8176 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8177 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8179 tw32(HOSTCC_RXCOL_TICKS, 0);
8180 tw32(HOSTCC_RXMAX_FRAMES, 0);
8181 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8184 if (!tg3_flag(tp, 5705_PLUS)) {
8185 u32 val = ec->stats_block_coalesce_usecs;
8187 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8188 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8190 if (!netif_carrier_ok(tp->dev))
8193 tw32(HOSTCC_STAT_COAL_TICKS, val);
8196 for (i = 0; i < tp->irq_cnt - 1; i++) {
8199 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8200 tw32(reg, ec->rx_coalesce_usecs);
8201 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8202 tw32(reg, ec->rx_max_coalesced_frames);
8203 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8204 tw32(reg, ec->rx_max_coalesced_frames_irq);
8206 if (tg3_flag(tp, ENABLE_TSS)) {
8207 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8208 tw32(reg, ec->tx_coalesce_usecs);
8209 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8210 tw32(reg, ec->tx_max_coalesced_frames);
8211 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8212 tw32(reg, ec->tx_max_coalesced_frames_irq);
8216 for (; i < tp->irq_max - 1; i++) {
8217 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8218 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8219 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8221 if (tg3_flag(tp, ENABLE_TSS)) {
8222 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8223 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8224 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8229 /* tp->lock is held. */
8230 static void tg3_rings_reset(struct tg3 *tp)
8233 u32 stblk, txrcb, rxrcb, limit;
8234 struct tg3_napi *tnapi = &tp->napi[0];
8236 /* Disable all transmit rings but the first. */
8237 if (!tg3_flag(tp, 5705_PLUS))
8238 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8239 else if (tg3_flag(tp, 5717_PLUS))
8240 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8241 else if (tg3_flag(tp, 57765_CLASS))
8242 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8244 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8246 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8247 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8248 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8249 BDINFO_FLAGS_DISABLED);
8252 /* Disable all receive return rings but the first. */
8253 if (tg3_flag(tp, 5717_PLUS))
8254 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8255 else if (!tg3_flag(tp, 5705_PLUS))
8256 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8257 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8258 tg3_flag(tp, 57765_CLASS))
8259 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8261 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8263 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8264 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8265 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8266 BDINFO_FLAGS_DISABLED);
8268 /* Disable interrupts */
8269 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8270 tp->napi[0].chk_msi_cnt = 0;
8271 tp->napi[0].last_rx_cons = 0;
8272 tp->napi[0].last_tx_cons = 0;
8274 /* Zero mailbox registers. */
8275 if (tg3_flag(tp, SUPPORT_MSIX)) {
8276 for (i = 1; i < tp->irq_max; i++) {
8277 tp->napi[i].tx_prod = 0;
8278 tp->napi[i].tx_cons = 0;
8279 if (tg3_flag(tp, ENABLE_TSS))
8280 tw32_mailbox(tp->napi[i].prodmbox, 0);
8281 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8282 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8283 tp->napi[i].chk_msi_cnt = 0;
8284 tp->napi[i].last_rx_cons = 0;
8285 tp->napi[i].last_tx_cons = 0;
8287 if (!tg3_flag(tp, ENABLE_TSS))
8288 tw32_mailbox(tp->napi[0].prodmbox, 0);
8290 tp->napi[0].tx_prod = 0;
8291 tp->napi[0].tx_cons = 0;
8292 tw32_mailbox(tp->napi[0].prodmbox, 0);
8293 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8296 /* Make sure the NIC-based send BD rings are disabled. */
8297 if (!tg3_flag(tp, 5705_PLUS)) {
8298 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8299 for (i = 0; i < 16; i++)
8300 tw32_tx_mbox(mbox + i * 8, 0);
8303 txrcb = NIC_SRAM_SEND_RCB;
8304 rxrcb = NIC_SRAM_RCV_RET_RCB;
8306 /* Clear status block in ram. */
8307 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8309 /* Set status block DMA address */
8310 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8311 ((u64) tnapi->status_mapping >> 32));
8312 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8313 ((u64) tnapi->status_mapping & 0xffffffff));
8315 if (tnapi->tx_ring) {
8316 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8317 (TG3_TX_RING_SIZE <<
8318 BDINFO_FLAGS_MAXLEN_SHIFT),
8319 NIC_SRAM_TX_BUFFER_DESC);
8320 txrcb += TG3_BDINFO_SIZE;
8323 if (tnapi->rx_rcb) {
8324 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8325 (tp->rx_ret_ring_mask + 1) <<
8326 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8327 rxrcb += TG3_BDINFO_SIZE;
8330 stblk = HOSTCC_STATBLCK_RING1;
8332 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8333 u64 mapping = (u64)tnapi->status_mapping;
8334 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8335 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8337 /* Clear status block in ram. */
8338 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8340 if (tnapi->tx_ring) {
8341 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8342 (TG3_TX_RING_SIZE <<
8343 BDINFO_FLAGS_MAXLEN_SHIFT),
8344 NIC_SRAM_TX_BUFFER_DESC);
8345 txrcb += TG3_BDINFO_SIZE;
8348 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8349 ((tp->rx_ret_ring_mask + 1) <<
8350 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8353 rxrcb += TG3_BDINFO_SIZE;
8357 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8359 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8361 if (!tg3_flag(tp, 5750_PLUS) ||
8362 tg3_flag(tp, 5780_CLASS) ||
8363 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8364 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8365 tg3_flag(tp, 57765_PLUS))
8366 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8367 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8368 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8369 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8371 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8373 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8374 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8376 val = min(nic_rep_thresh, host_rep_thresh);
8377 tw32(RCVBDI_STD_THRESH, val);
8379 if (tg3_flag(tp, 57765_PLUS))
8380 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8382 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8385 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8387 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8389 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8390 tw32(RCVBDI_JUMBO_THRESH, val);
8392 if (tg3_flag(tp, 57765_PLUS))
8393 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8396 static inline u32 calc_crc(unsigned char *buf, int len)
8404 for (j = 0; j < len; j++) {
8407 for (k = 0; k < 8; k++) {
8420 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8422 /* accept or reject all multicast frames */
8423 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8424 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8425 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8426 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8429 static void __tg3_set_rx_mode(struct net_device *dev)
8431 struct tg3 *tp = netdev_priv(dev);
8434 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8435 RX_MODE_KEEP_VLAN_TAG);
8437 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8438 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8441 if (!tg3_flag(tp, ENABLE_ASF))
8442 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8445 if (dev->flags & IFF_PROMISC) {
8446 /* Promiscuous mode. */
8447 rx_mode |= RX_MODE_PROMISC;
8448 } else if (dev->flags & IFF_ALLMULTI) {
8449 /* Accept all multicast. */
8450 tg3_set_multi(tp, 1);
8451 } else if (netdev_mc_empty(dev)) {
8452 /* Reject all multicast. */
8453 tg3_set_multi(tp, 0);
8455 /* Accept one or more multicast(s). */
8456 struct netdev_hw_addr *ha;
8457 u32 mc_filter[4] = { 0, };
8462 netdev_for_each_mc_addr(ha, dev) {
8463 crc = calc_crc(ha->addr, ETH_ALEN);
8465 regidx = (bit & 0x60) >> 5;
8467 mc_filter[regidx] |= (1 << bit);
8470 tw32(MAC_HASH_REG_0, mc_filter[0]);
8471 tw32(MAC_HASH_REG_1, mc_filter[1]);
8472 tw32(MAC_HASH_REG_2, mc_filter[2]);
8473 tw32(MAC_HASH_REG_3, mc_filter[3]);
8476 if (rx_mode != tp->rx_mode) {
8477 tp->rx_mode = rx_mode;
8478 tw32_f(MAC_RX_MODE, rx_mode);
8483 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8487 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8488 tp->rss_ind_tbl[i] =
8489 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8492 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8496 if (!tg3_flag(tp, SUPPORT_MSIX))
8499 if (tp->irq_cnt <= 2) {
8500 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8504 /* Validate table against current IRQ count */
8505 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8506 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8510 if (i != TG3_RSS_INDIR_TBL_SIZE)
8511 tg3_rss_init_dflt_indir_tbl(tp);
8514 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8517 u32 reg = MAC_RSS_INDIR_TBL_0;
8519 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8520 u32 val = tp->rss_ind_tbl[i];
8522 for (; i % 8; i++) {
8524 val |= tp->rss_ind_tbl[i];
8531 /* tp->lock is held. */
8532 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8534 u32 val, rdmac_mode;
8536 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8538 tg3_disable_ints(tp);
8542 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8544 if (tg3_flag(tp, INIT_COMPLETE))
8545 tg3_abort_hw(tp, 1);
8547 /* Enable MAC control of LPI */
8548 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8549 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8550 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8551 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8553 tw32_f(TG3_CPMU_EEE_CTRL,
8554 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8556 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8557 TG3_CPMU_EEEMD_LPI_IN_TX |
8558 TG3_CPMU_EEEMD_LPI_IN_RX |
8559 TG3_CPMU_EEEMD_EEE_ENABLE;
8561 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8562 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8564 if (tg3_flag(tp, ENABLE_APE))
8565 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8567 tw32_f(TG3_CPMU_EEE_MODE, val);
8569 tw32_f(TG3_CPMU_EEE_DBTMR1,
8570 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8571 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8573 tw32_f(TG3_CPMU_EEE_DBTMR2,
8574 TG3_CPMU_DBTMR2_APE_TX_2047US |
8575 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8581 err = tg3_chip_reset(tp);
8585 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8587 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8588 val = tr32(TG3_CPMU_CTRL);
8589 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8590 tw32(TG3_CPMU_CTRL, val);
8592 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8593 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8594 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8595 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8597 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8598 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8599 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8600 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8602 val = tr32(TG3_CPMU_HST_ACC);
8603 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8604 val |= CPMU_HST_ACC_MACCLK_6_25;
8605 tw32(TG3_CPMU_HST_ACC, val);
8608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8609 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8610 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8611 PCIE_PWR_MGMT_L1_THRESH_4MS;
8612 tw32(PCIE_PWR_MGMT_THRESH, val);
8614 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8615 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8617 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8619 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8620 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8623 if (tg3_flag(tp, L1PLLPD_EN)) {
8624 u32 grc_mode = tr32(GRC_MODE);
8626 /* Access the lower 1K of PL PCIE block registers. */
8627 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8628 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8630 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8631 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8632 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8634 tw32(GRC_MODE, grc_mode);
8637 if (tg3_flag(tp, 57765_CLASS)) {
8638 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8639 u32 grc_mode = tr32(GRC_MODE);
8641 /* Access the lower 1K of PL PCIE block registers. */
8642 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8643 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8645 val = tr32(TG3_PCIE_TLDLPL_PORT +
8646 TG3_PCIE_PL_LO_PHYCTL5);
8647 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8648 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8650 tw32(GRC_MODE, grc_mode);
8653 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8654 u32 grc_mode = tr32(GRC_MODE);
8656 /* Access the lower 1K of DL PCIE block registers. */
8657 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8658 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8660 val = tr32(TG3_PCIE_TLDLPL_PORT +
8661 TG3_PCIE_DL_LO_FTSMAX);
8662 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8663 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8664 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8666 tw32(GRC_MODE, grc_mode);
8669 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8670 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8671 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8672 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8675 /* This works around an issue with Athlon chipsets on
8676 * B3 tigon3 silicon. This bit has no effect on any
8677 * other revision. But do not set this on PCI Express
8678 * chips and don't even touch the clocks if the CPMU is present.
8680 if (!tg3_flag(tp, CPMU_PRESENT)) {
8681 if (!tg3_flag(tp, PCI_EXPRESS))
8682 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8683 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8686 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8687 tg3_flag(tp, PCIX_MODE)) {
8688 val = tr32(TG3PCI_PCISTATE);
8689 val |= PCISTATE_RETRY_SAME_DMA;
8690 tw32(TG3PCI_PCISTATE, val);
8693 if (tg3_flag(tp, ENABLE_APE)) {
8694 /* Allow reads and writes to the
8695 * APE register and memory space.
8697 val = tr32(TG3PCI_PCISTATE);
8698 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8699 PCISTATE_ALLOW_APE_SHMEM_WR |
8700 PCISTATE_ALLOW_APE_PSPACE_WR;
8701 tw32(TG3PCI_PCISTATE, val);
8704 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8705 /* Enable some hw fixes. */
8706 val = tr32(TG3PCI_MSI_DATA);
8707 val |= (1 << 26) | (1 << 28) | (1 << 29);
8708 tw32(TG3PCI_MSI_DATA, val);
8711 /* Descriptor ring init may make accesses to the
8712 * NIC SRAM area to setup the TX descriptors, so we
8713 * can only do this after the hardware has been
8714 * successfully reset.
8716 err = tg3_init_rings(tp);
8720 if (tg3_flag(tp, 57765_PLUS)) {
8721 val = tr32(TG3PCI_DMA_RW_CTRL) &
8722 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8723 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8724 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8725 if (!tg3_flag(tp, 57765_CLASS) &&
8726 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8727 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8728 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8729 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8730 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8731 /* This value is determined during the probe time DMA
8732 * engine test, tg3_test_dma.
8734 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8737 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8738 GRC_MODE_4X_NIC_SEND_RINGS |
8739 GRC_MODE_NO_TX_PHDR_CSUM |
8740 GRC_MODE_NO_RX_PHDR_CSUM);
8741 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8743 /* Pseudo-header checksum is done by hardware logic and not
8744 * the offload processers, so make the chip do the pseudo-
8745 * header checksums on receive. For transmit it is more
8746 * convenient to do the pseudo-header checksum in software
8747 * as Linux does that on transmit for us in all cases.
8749 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8753 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8755 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8756 val = tr32(GRC_MISC_CFG);
8758 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8759 tw32(GRC_MISC_CFG, val);
8761 /* Initialize MBUF/DESC pool. */
8762 if (tg3_flag(tp, 5750_PLUS)) {
8764 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8765 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8767 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8769 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8770 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8771 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8772 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8775 fw_len = tp->fw_len;
8776 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8777 tw32(BUFMGR_MB_POOL_ADDR,
8778 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8779 tw32(BUFMGR_MB_POOL_SIZE,
8780 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8783 if (tp->dev->mtu <= ETH_DATA_LEN) {
8784 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8785 tp->bufmgr_config.mbuf_read_dma_low_water);
8786 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8787 tp->bufmgr_config.mbuf_mac_rx_low_water);
8788 tw32(BUFMGR_MB_HIGH_WATER,
8789 tp->bufmgr_config.mbuf_high_water);
8791 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8792 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8793 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8794 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8795 tw32(BUFMGR_MB_HIGH_WATER,
8796 tp->bufmgr_config.mbuf_high_water_jumbo);
8798 tw32(BUFMGR_DMA_LOW_WATER,
8799 tp->bufmgr_config.dma_low_water);
8800 tw32(BUFMGR_DMA_HIGH_WATER,
8801 tp->bufmgr_config.dma_high_water);
8803 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8804 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8805 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8806 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8807 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8808 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8809 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8810 tw32(BUFMGR_MODE, val);
8811 for (i = 0; i < 2000; i++) {
8812 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8817 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8821 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8822 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8824 tg3_setup_rxbd_thresholds(tp);
8826 /* Initialize TG3_BDINFO's at:
8827 * RCVDBDI_STD_BD: standard eth size rx ring
8828 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8829 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8832 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8833 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8834 * ring attribute flags
8835 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8837 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8838 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8840 * The size of each ring is fixed in the firmware, but the location is
8843 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8844 ((u64) tpr->rx_std_mapping >> 32));
8845 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8846 ((u64) tpr->rx_std_mapping & 0xffffffff));
8847 if (!tg3_flag(tp, 5717_PLUS))
8848 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8849 NIC_SRAM_RX_BUFFER_DESC);
8851 /* Disable the mini ring */
8852 if (!tg3_flag(tp, 5705_PLUS))
8853 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8854 BDINFO_FLAGS_DISABLED);
8856 /* Program the jumbo buffer descriptor ring control
8857 * blocks on those devices that have them.
8859 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8860 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8862 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8863 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8864 ((u64) tpr->rx_jmb_mapping >> 32));
8865 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8866 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8867 val = TG3_RX_JMB_RING_SIZE(tp) <<
8868 BDINFO_FLAGS_MAXLEN_SHIFT;
8869 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8870 val | BDINFO_FLAGS_USE_EXT_RECV);
8871 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8872 tg3_flag(tp, 57765_CLASS))
8873 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8874 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8876 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8877 BDINFO_FLAGS_DISABLED);
8880 if (tg3_flag(tp, 57765_PLUS)) {
8881 val = TG3_RX_STD_RING_SIZE(tp);
8882 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8883 val |= (TG3_RX_STD_DMA_SZ << 2);
8885 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8887 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8889 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8891 tpr->rx_std_prod_idx = tp->rx_pending;
8892 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8894 tpr->rx_jmb_prod_idx =
8895 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8896 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8898 tg3_rings_reset(tp);
8900 /* Initialize MAC address and backoff seed. */
8901 __tg3_set_mac_addr(tp, 0);
8903 /* MTU + ethernet header + FCS + optional VLAN tag */
8904 tw32(MAC_RX_MTU_SIZE,
8905 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8907 /* The slot time is changed by tg3_setup_phy if we
8908 * run at gigabit with half duplex.
8910 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8911 (6 << TX_LENGTHS_IPG_SHIFT) |
8912 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8914 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8915 val |= tr32(MAC_TX_LENGTHS) &
8916 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8917 TX_LENGTHS_CNT_DWN_VAL_MSK);
8919 tw32(MAC_TX_LENGTHS, val);
8921 /* Receive rules. */
8922 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8923 tw32(RCVLPC_CONFIG, 0x0181);
8925 /* Calculate RDMAC_MODE setting early, we need it to determine
8926 * the RCVLPC_STATE_ENABLE mask.
8928 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8929 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8930 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8931 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8932 RDMAC_MODE_LNGREAD_ENAB);
8934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8935 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8939 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8940 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8941 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8942 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8944 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8945 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8946 if (tg3_flag(tp, TSO_CAPABLE) &&
8947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8948 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8949 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8950 !tg3_flag(tp, IS_5788)) {
8951 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8955 if (tg3_flag(tp, PCI_EXPRESS))
8956 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8958 if (tg3_flag(tp, HW_TSO_1) ||
8959 tg3_flag(tp, HW_TSO_2) ||
8960 tg3_flag(tp, HW_TSO_3))
8961 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8963 if (tg3_flag(tp, 57765_PLUS) ||
8964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8966 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8969 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8974 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8975 tg3_flag(tp, 57765_PLUS)) {
8976 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8977 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8978 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8979 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8980 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8981 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8982 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8983 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8984 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8986 tw32(TG3_RDMA_RSRVCTRL_REG,
8987 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8992 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8993 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8994 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8995 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8998 /* Receive/send statistics. */
8999 if (tg3_flag(tp, 5750_PLUS)) {
9000 val = tr32(RCVLPC_STATS_ENABLE);
9001 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9002 tw32(RCVLPC_STATS_ENABLE, val);
9003 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9004 tg3_flag(tp, TSO_CAPABLE)) {
9005 val = tr32(RCVLPC_STATS_ENABLE);
9006 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9007 tw32(RCVLPC_STATS_ENABLE, val);
9009 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9011 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9012 tw32(SNDDATAI_STATSENAB, 0xffffff);
9013 tw32(SNDDATAI_STATSCTRL,
9014 (SNDDATAI_SCTRL_ENABLE |
9015 SNDDATAI_SCTRL_FASTUPD));
9017 /* Setup host coalescing engine. */
9018 tw32(HOSTCC_MODE, 0);
9019 for (i = 0; i < 2000; i++) {
9020 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9025 __tg3_set_coalesce(tp, &tp->coal);
9027 if (!tg3_flag(tp, 5705_PLUS)) {
9028 /* Status/statistics block address. See tg3_timer,
9029 * the tg3_periodic_fetch_stats call there, and
9030 * tg3_get_stats to see how this works for 5705/5750 chips.
9032 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9033 ((u64) tp->stats_mapping >> 32));
9034 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9035 ((u64) tp->stats_mapping & 0xffffffff));
9036 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9038 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9040 /* Clear statistics and status block memory areas */
9041 for (i = NIC_SRAM_STATS_BLK;
9042 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9044 tg3_write_mem(tp, i, 0);
9049 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9051 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9052 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9053 if (!tg3_flag(tp, 5705_PLUS))
9054 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9056 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9057 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9058 /* reset to prevent losing 1st rx packet intermittently */
9059 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9063 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9064 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9065 MAC_MODE_FHDE_ENABLE;
9066 if (tg3_flag(tp, ENABLE_APE))
9067 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9068 if (!tg3_flag(tp, 5705_PLUS) &&
9069 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9071 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9072 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9075 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9076 * If TG3_FLAG_IS_NIC is zero, we should read the
9077 * register to preserve the GPIO settings for LOMs. The GPIOs,
9078 * whether used as inputs or outputs, are set by boot code after
9081 if (!tg3_flag(tp, IS_NIC)) {
9084 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9085 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9086 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9089 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9090 GRC_LCLCTRL_GPIO_OUTPUT3;
9092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9093 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9095 tp->grc_local_ctrl &= ~gpio_mask;
9096 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9098 /* GPIO1 must be driven high for eeprom write protect */
9099 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9100 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9101 GRC_LCLCTRL_GPIO_OUTPUT1);
9103 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9106 if (tg3_flag(tp, USING_MSIX)) {
9107 val = tr32(MSGINT_MODE);
9108 val |= MSGINT_MODE_ENABLE;
9109 if (tp->irq_cnt > 1)
9110 val |= MSGINT_MODE_MULTIVEC_EN;
9111 if (!tg3_flag(tp, 1SHOT_MSI))
9112 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9113 tw32(MSGINT_MODE, val);
9116 if (!tg3_flag(tp, 5705_PLUS)) {
9117 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9121 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9122 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9123 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9124 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9125 WDMAC_MODE_LNGREAD_ENAB);
9127 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9128 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9129 if (tg3_flag(tp, TSO_CAPABLE) &&
9130 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9131 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9133 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9134 !tg3_flag(tp, IS_5788)) {
9135 val |= WDMAC_MODE_RX_ACCEL;
9139 /* Enable host coalescing bug fix */
9140 if (tg3_flag(tp, 5755_PLUS))
9141 val |= WDMAC_MODE_STATUS_TAG_FIX;
9143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9144 val |= WDMAC_MODE_BURST_ALL_DATA;
9146 tw32_f(WDMAC_MODE, val);
9149 if (tg3_flag(tp, PCIX_MODE)) {
9152 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9155 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9156 pcix_cmd |= PCI_X_CMD_READ_2K;
9157 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9158 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9159 pcix_cmd |= PCI_X_CMD_READ_2K;
9161 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9165 tw32_f(RDMAC_MODE, rdmac_mode);
9168 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9169 if (!tg3_flag(tp, 5705_PLUS))
9170 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9174 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9176 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9178 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9179 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9180 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9181 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9182 val |= RCVDBDI_MODE_LRG_RING_SZ;
9183 tw32(RCVDBDI_MODE, val);
9184 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9185 if (tg3_flag(tp, HW_TSO_1) ||
9186 tg3_flag(tp, HW_TSO_2) ||
9187 tg3_flag(tp, HW_TSO_3))
9188 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9189 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9190 if (tg3_flag(tp, ENABLE_TSS))
9191 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9192 tw32(SNDBDI_MODE, val);
9193 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9195 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9196 err = tg3_load_5701_a0_firmware_fix(tp);
9201 if (tg3_flag(tp, TSO_CAPABLE)) {
9202 err = tg3_load_tso_firmware(tp);
9207 tp->tx_mode = TX_MODE_ENABLE;
9209 if (tg3_flag(tp, 5755_PLUS) ||
9210 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9211 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9213 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9214 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9215 tp->tx_mode &= ~val;
9216 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9219 tw32_f(MAC_TX_MODE, tp->tx_mode);
9222 if (tg3_flag(tp, ENABLE_RSS)) {
9223 tg3_rss_write_indir_tbl(tp);
9225 /* Setup the "secret" hash key. */
9226 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9227 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9228 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9229 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9230 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9231 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9232 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9233 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9234 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9235 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9238 tp->rx_mode = RX_MODE_ENABLE;
9239 if (tg3_flag(tp, 5755_PLUS))
9240 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9242 if (tg3_flag(tp, ENABLE_RSS))
9243 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9244 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9245 RX_MODE_RSS_IPV6_HASH_EN |
9246 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9247 RX_MODE_RSS_IPV4_HASH_EN |
9248 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9250 tw32_f(MAC_RX_MODE, tp->rx_mode);
9253 tw32(MAC_LED_CTRL, tp->led_ctrl);
9255 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9256 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9257 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9260 tw32_f(MAC_RX_MODE, tp->rx_mode);
9263 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9264 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9265 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9266 /* Set drive transmission level to 1.2V */
9267 /* only if the signal pre-emphasis bit is not set */
9268 val = tr32(MAC_SERDES_CFG);
9271 tw32(MAC_SERDES_CFG, val);
9273 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9274 tw32(MAC_SERDES_CFG, 0x616000);
9277 /* Prevent chip from dropping frames when flow control
9280 if (tg3_flag(tp, 57765_CLASS))
9284 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9287 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9288 /* Use hardware link auto-negotiation */
9289 tg3_flag_set(tp, HW_AUTONEG);
9292 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9296 tmp = tr32(SERDES_RX_CTRL);
9297 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9298 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9299 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9300 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9303 if (!tg3_flag(tp, USE_PHYLIB)) {
9304 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9305 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9307 err = tg3_setup_phy(tp, 0);
9311 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9312 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9315 /* Clear CRC stats. */
9316 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9317 tg3_writephy(tp, MII_TG3_TEST1,
9318 tmp | MII_TG3_TEST1_CRC_EN);
9319 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9324 __tg3_set_rx_mode(tp->dev);
9326 /* Initialize receive rules. */
9327 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9328 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9329 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9330 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9332 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9336 if (tg3_flag(tp, ENABLE_ASF))
9340 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9342 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9344 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9346 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9348 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9350 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9352 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9354 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9356 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9358 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9360 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9362 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9364 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9366 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9374 if (tg3_flag(tp, ENABLE_APE))
9375 /* Write our heartbeat update interval to APE. */
9376 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9377 APE_HOST_HEARTBEAT_INT_DISABLE);
9379 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9384 /* Called at device open time to get the chip ready for
9385 * packet processing. Invoked with tp->lock held.
9387 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9389 tg3_switch_clocks(tp);
9391 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9393 return tg3_reset_hw(tp, reset_phy);
9396 #define TG3_STAT_ADD32(PSTAT, REG) \
9397 do { u32 __val = tr32(REG); \
9398 (PSTAT)->low += __val; \
9399 if ((PSTAT)->low < __val) \
9400 (PSTAT)->high += 1; \
9403 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9405 struct tg3_hw_stats *sp = tp->hw_stats;
9407 if (!netif_carrier_ok(tp->dev))
9410 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9411 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9412 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9413 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9414 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9415 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9416 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9417 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9418 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9419 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9420 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9421 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9422 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9424 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9425 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9426 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9427 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9428 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9429 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9430 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9431 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9432 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9433 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9434 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9435 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9436 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9437 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9439 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9440 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9441 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9442 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9443 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9445 u32 val = tr32(HOSTCC_FLOW_ATTN);
9446 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9448 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9449 sp->rx_discards.low += val;
9450 if (sp->rx_discards.low < val)
9451 sp->rx_discards.high += 1;
9453 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9455 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9458 static void tg3_chk_missed_msi(struct tg3 *tp)
9462 for (i = 0; i < tp->irq_cnt; i++) {
9463 struct tg3_napi *tnapi = &tp->napi[i];
9465 if (tg3_has_work(tnapi)) {
9466 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9467 tnapi->last_tx_cons == tnapi->tx_cons) {
9468 if (tnapi->chk_msi_cnt < 1) {
9469 tnapi->chk_msi_cnt++;
9475 tnapi->chk_msi_cnt = 0;
9476 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9477 tnapi->last_tx_cons = tnapi->tx_cons;
9481 static void tg3_timer(unsigned long __opaque)
9483 struct tg3 *tp = (struct tg3 *) __opaque;
9485 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9488 spin_lock(&tp->lock);
9490 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9491 tg3_flag(tp, 57765_CLASS))
9492 tg3_chk_missed_msi(tp);
9494 if (!tg3_flag(tp, TAGGED_STATUS)) {
9495 /* All of this garbage is because when using non-tagged
9496 * IRQ status the mailbox/status_block protocol the chip
9497 * uses with the cpu is race prone.
9499 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9500 tw32(GRC_LOCAL_CTRL,
9501 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9503 tw32(HOSTCC_MODE, tp->coalesce_mode |
9504 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9507 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9508 spin_unlock(&tp->lock);
9509 tg3_reset_task_schedule(tp);
9514 /* This part only runs once per second. */
9515 if (!--tp->timer_counter) {
9516 if (tg3_flag(tp, 5705_PLUS))
9517 tg3_periodic_fetch_stats(tp);
9519 if (tp->setlpicnt && !--tp->setlpicnt)
9520 tg3_phy_eee_enable(tp);
9522 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9526 mac_stat = tr32(MAC_STATUS);
9529 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9530 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9532 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9536 tg3_setup_phy(tp, 0);
9537 } else if (tg3_flag(tp, POLL_SERDES)) {
9538 u32 mac_stat = tr32(MAC_STATUS);
9541 if (netif_carrier_ok(tp->dev) &&
9542 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9545 if (!netif_carrier_ok(tp->dev) &&
9546 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9547 MAC_STATUS_SIGNAL_DET))) {
9551 if (!tp->serdes_counter) {
9554 ~MAC_MODE_PORT_MODE_MASK));
9556 tw32_f(MAC_MODE, tp->mac_mode);
9559 tg3_setup_phy(tp, 0);
9561 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9562 tg3_flag(tp, 5780_CLASS)) {
9563 tg3_serdes_parallel_detect(tp);
9566 tp->timer_counter = tp->timer_multiplier;
9569 /* Heartbeat is only sent once every 2 seconds.
9571 * The heartbeat is to tell the ASF firmware that the host
9572 * driver is still alive. In the event that the OS crashes,
9573 * ASF needs to reset the hardware to free up the FIFO space
9574 * that may be filled with rx packets destined for the host.
9575 * If the FIFO is full, ASF will no longer function properly.
9577 * Unintended resets have been reported on real time kernels
9578 * where the timer doesn't run on time. Netpoll will also have
9581 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9582 * to check the ring condition when the heartbeat is expiring
9583 * before doing the reset. This will prevent most unintended
9586 if (!--tp->asf_counter) {
9587 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9588 tg3_wait_for_event_ack(tp);
9590 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9591 FWCMD_NICDRV_ALIVE3);
9592 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9593 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9594 TG3_FW_UPDATE_TIMEOUT_SEC);
9596 tg3_generate_fw_event(tp);
9598 tp->asf_counter = tp->asf_multiplier;
9601 spin_unlock(&tp->lock);
9604 tp->timer.expires = jiffies + tp->timer_offset;
9605 add_timer(&tp->timer);
9608 static void __devinit tg3_timer_init(struct tg3 *tp)
9610 if (tg3_flag(tp, TAGGED_STATUS) &&
9611 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9612 !tg3_flag(tp, 57765_CLASS))
9613 tp->timer_offset = HZ;
9615 tp->timer_offset = HZ / 10;
9617 BUG_ON(tp->timer_offset > HZ);
9619 tp->timer_multiplier = (HZ / tp->timer_offset);
9620 tp->asf_multiplier = (HZ / tp->timer_offset) *
9621 TG3_FW_UPDATE_FREQ_SEC;
9623 init_timer(&tp->timer);
9624 tp->timer.data = (unsigned long) tp;
9625 tp->timer.function = tg3_timer;
9628 static void tg3_timer_start(struct tg3 *tp)
9630 tp->asf_counter = tp->asf_multiplier;
9631 tp->timer_counter = tp->timer_multiplier;
9633 tp->timer.expires = jiffies + tp->timer_offset;
9634 add_timer(&tp->timer);
9637 static void tg3_timer_stop(struct tg3 *tp)
9639 del_timer_sync(&tp->timer);
9642 /* Restart hardware after configuration changes, self-test, etc.
9643 * Invoked with tp->lock held.
9645 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9646 __releases(tp->lock)
9647 __acquires(tp->lock)
9651 err = tg3_init_hw(tp, reset_phy);
9654 "Failed to re-initialize device, aborting\n");
9655 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9656 tg3_full_unlock(tp);
9659 tg3_napi_enable(tp);
9661 tg3_full_lock(tp, 0);
9666 static void tg3_reset_task(struct work_struct *work)
9668 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9671 tg3_full_lock(tp, 0);
9673 if (!netif_running(tp->dev)) {
9674 tg3_flag_clear(tp, RESET_TASK_PENDING);
9675 tg3_full_unlock(tp);
9679 tg3_full_unlock(tp);
9685 tg3_full_lock(tp, 1);
9687 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9688 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9689 tp->write32_rx_mbox = tg3_write_flush_reg32;
9690 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9691 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9694 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9695 err = tg3_init_hw(tp, 1);
9699 tg3_netif_start(tp);
9702 tg3_full_unlock(tp);
9707 tg3_flag_clear(tp, RESET_TASK_PENDING);
9710 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9713 unsigned long flags;
9715 struct tg3_napi *tnapi = &tp->napi[irq_num];
9717 if (tp->irq_cnt == 1)
9718 name = tp->dev->name;
9720 name = &tnapi->irq_lbl[0];
9721 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9722 name[IFNAMSIZ-1] = 0;
9725 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9727 if (tg3_flag(tp, 1SHOT_MSI))
9732 if (tg3_flag(tp, TAGGED_STATUS))
9733 fn = tg3_interrupt_tagged;
9734 flags = IRQF_SHARED;
9737 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9740 static int tg3_test_interrupt(struct tg3 *tp)
9742 struct tg3_napi *tnapi = &tp->napi[0];
9743 struct net_device *dev = tp->dev;
9744 int err, i, intr_ok = 0;
9747 if (!netif_running(dev))
9750 tg3_disable_ints(tp);
9752 free_irq(tnapi->irq_vec, tnapi);
9755 * Turn off MSI one shot mode. Otherwise this test has no
9756 * observable way to know whether the interrupt was delivered.
9758 if (tg3_flag(tp, 57765_PLUS)) {
9759 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9760 tw32(MSGINT_MODE, val);
9763 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9764 IRQF_SHARED, dev->name, tnapi);
9768 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9769 tg3_enable_ints(tp);
9771 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9774 for (i = 0; i < 5; i++) {
9775 u32 int_mbox, misc_host_ctrl;
9777 int_mbox = tr32_mailbox(tnapi->int_mbox);
9778 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9780 if ((int_mbox != 0) ||
9781 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9786 if (tg3_flag(tp, 57765_PLUS) &&
9787 tnapi->hw_status->status_tag != tnapi->last_tag)
9788 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9793 tg3_disable_ints(tp);
9795 free_irq(tnapi->irq_vec, tnapi);
9797 err = tg3_request_irq(tp, 0);
9803 /* Reenable MSI one shot mode. */
9804 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9805 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9806 tw32(MSGINT_MODE, val);
9814 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9815 * successfully restored
9817 static int tg3_test_msi(struct tg3 *tp)
9822 if (!tg3_flag(tp, USING_MSI))
9825 /* Turn off SERR reporting in case MSI terminates with Master
9828 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9829 pci_write_config_word(tp->pdev, PCI_COMMAND,
9830 pci_cmd & ~PCI_COMMAND_SERR);
9832 err = tg3_test_interrupt(tp);
9834 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9839 /* other failures */
9843 /* MSI test failed, go back to INTx mode */
9844 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9845 "to INTx mode. Please report this failure to the PCI "
9846 "maintainer and include system chipset information\n");
9848 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9850 pci_disable_msi(tp->pdev);
9852 tg3_flag_clear(tp, USING_MSI);
9853 tp->napi[0].irq_vec = tp->pdev->irq;
9855 err = tg3_request_irq(tp, 0);
9859 /* Need to reset the chip because the MSI cycle may have terminated
9860 * with Master Abort.
9862 tg3_full_lock(tp, 1);
9864 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9865 err = tg3_init_hw(tp, 1);
9867 tg3_full_unlock(tp);
9870 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9875 static int tg3_request_firmware(struct tg3 *tp)
9877 const __be32 *fw_data;
9879 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9880 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9885 fw_data = (void *)tp->fw->data;
9887 /* Firmware blob starts with version numbers, followed by
9888 * start address and _full_ length including BSS sections
9889 * (which must be longer than the actual data, of course
9892 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9893 if (tp->fw_len < (tp->fw->size - 12)) {
9894 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9895 tp->fw_len, tp->fw_needed);
9896 release_firmware(tp->fw);
9901 /* We no longer need firmware; we have it. */
9902 tp->fw_needed = NULL;
9906 static bool tg3_enable_msix(struct tg3 *tp)
9909 struct msix_entry msix_ent[tp->irq_max];
9911 tp->irq_cnt = num_online_cpus();
9912 if (tp->irq_cnt > 1) {
9913 /* We want as many rx rings enabled as there are cpus.
9914 * In multiqueue MSI-X mode, the first MSI-X vector
9915 * only deals with link interrupts, etc, so we add
9916 * one to the number of vectors we are requesting.
9918 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9921 for (i = 0; i < tp->irq_max; i++) {
9922 msix_ent[i].entry = i;
9923 msix_ent[i].vector = 0;
9926 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9929 } else if (rc != 0) {
9930 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9932 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9937 for (i = 0; i < tp->irq_max; i++)
9938 tp->napi[i].irq_vec = msix_ent[i].vector;
9940 netif_set_real_num_tx_queues(tp->dev, 1);
9941 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9942 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9943 pci_disable_msix(tp->pdev);
9947 if (tp->irq_cnt > 1) {
9948 tg3_flag_set(tp, ENABLE_RSS);
9950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9952 tg3_flag_set(tp, ENABLE_TSS);
9953 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9960 static void tg3_ints_init(struct tg3 *tp)
9962 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9963 !tg3_flag(tp, TAGGED_STATUS)) {
9964 /* All MSI supporting chips should support tagged
9965 * status. Assert that this is the case.
9967 netdev_warn(tp->dev,
9968 "MSI without TAGGED_STATUS? Not using MSI\n");
9972 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9973 tg3_flag_set(tp, USING_MSIX);
9974 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9975 tg3_flag_set(tp, USING_MSI);
9977 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9978 u32 msi_mode = tr32(MSGINT_MODE);
9979 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9980 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9981 if (!tg3_flag(tp, 1SHOT_MSI))
9982 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9983 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9986 if (!tg3_flag(tp, USING_MSIX)) {
9988 tp->napi[0].irq_vec = tp->pdev->irq;
9989 netif_set_real_num_tx_queues(tp->dev, 1);
9990 netif_set_real_num_rx_queues(tp->dev, 1);
9994 static void tg3_ints_fini(struct tg3 *tp)
9996 if (tg3_flag(tp, USING_MSIX))
9997 pci_disable_msix(tp->pdev);
9998 else if (tg3_flag(tp, USING_MSI))
9999 pci_disable_msi(tp->pdev);
10000 tg3_flag_clear(tp, USING_MSI);
10001 tg3_flag_clear(tp, USING_MSIX);
10002 tg3_flag_clear(tp, ENABLE_RSS);
10003 tg3_flag_clear(tp, ENABLE_TSS);
10006 static int tg3_open(struct net_device *dev)
10008 struct tg3 *tp = netdev_priv(dev);
10011 if (tp->fw_needed) {
10012 err = tg3_request_firmware(tp);
10013 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10017 netdev_warn(tp->dev, "TSO capability disabled\n");
10018 tg3_flag_clear(tp, TSO_CAPABLE);
10019 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10020 netdev_notice(tp->dev, "TSO capability restored\n");
10021 tg3_flag_set(tp, TSO_CAPABLE);
10025 netif_carrier_off(tp->dev);
10027 err = tg3_power_up(tp);
10031 tg3_full_lock(tp, 0);
10033 tg3_disable_ints(tp);
10034 tg3_flag_clear(tp, INIT_COMPLETE);
10036 tg3_full_unlock(tp);
10039 * Setup interrupts first so we know how
10040 * many NAPI resources to allocate
10044 tg3_rss_check_indir_tbl(tp);
10046 /* The placement of this call is tied
10047 * to the setup and use of Host TX descriptors.
10049 err = tg3_alloc_consistent(tp);
10055 tg3_napi_enable(tp);
10057 for (i = 0; i < tp->irq_cnt; i++) {
10058 struct tg3_napi *tnapi = &tp->napi[i];
10059 err = tg3_request_irq(tp, i);
10061 for (i--; i >= 0; i--) {
10062 tnapi = &tp->napi[i];
10063 free_irq(tnapi->irq_vec, tnapi);
10069 tg3_full_lock(tp, 0);
10071 err = tg3_init_hw(tp, 1);
10073 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10074 tg3_free_rings(tp);
10077 tg3_full_unlock(tp);
10082 if (tg3_flag(tp, USING_MSI)) {
10083 err = tg3_test_msi(tp);
10086 tg3_full_lock(tp, 0);
10087 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10088 tg3_free_rings(tp);
10089 tg3_full_unlock(tp);
10094 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10095 u32 val = tr32(PCIE_TRANSACTION_CFG);
10097 tw32(PCIE_TRANSACTION_CFG,
10098 val | PCIE_TRANS_CFG_1SHOT_MSI);
10104 tg3_full_lock(tp, 0);
10106 tg3_timer_start(tp);
10107 tg3_flag_set(tp, INIT_COMPLETE);
10108 tg3_enable_ints(tp);
10110 tg3_full_unlock(tp);
10112 netif_tx_start_all_queues(dev);
10115 * Reset loopback feature if it was turned on while the device was down
10116 * make sure that it's installed properly now.
10118 if (dev->features & NETIF_F_LOOPBACK)
10119 tg3_set_loopback(dev, dev->features);
10124 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10125 struct tg3_napi *tnapi = &tp->napi[i];
10126 free_irq(tnapi->irq_vec, tnapi);
10130 tg3_napi_disable(tp);
10132 tg3_free_consistent(tp);
10136 tg3_frob_aux_power(tp, false);
10137 pci_set_power_state(tp->pdev, PCI_D3hot);
10141 static int tg3_close(struct net_device *dev)
10144 struct tg3 *tp = netdev_priv(dev);
10146 tg3_napi_disable(tp);
10147 tg3_reset_task_cancel(tp);
10149 netif_tx_stop_all_queues(dev);
10151 tg3_timer_stop(tp);
10155 tg3_full_lock(tp, 1);
10157 tg3_disable_ints(tp);
10159 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10160 tg3_free_rings(tp);
10161 tg3_flag_clear(tp, INIT_COMPLETE);
10163 tg3_full_unlock(tp);
10165 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10166 struct tg3_napi *tnapi = &tp->napi[i];
10167 free_irq(tnapi->irq_vec, tnapi);
10172 /* Clear stats across close / open calls */
10173 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10174 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10178 tg3_free_consistent(tp);
10180 tg3_power_down(tp);
10182 netif_carrier_off(tp->dev);
10187 static inline u64 get_stat64(tg3_stat64_t *val)
10189 return ((u64)val->high << 32) | ((u64)val->low);
10192 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10194 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10196 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10197 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10198 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10201 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10202 tg3_writephy(tp, MII_TG3_TEST1,
10203 val | MII_TG3_TEST1_CRC_EN);
10204 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10208 tp->phy_crc_errors += val;
10210 return tp->phy_crc_errors;
10213 return get_stat64(&hw_stats->rx_fcs_errors);
10216 #define ESTAT_ADD(member) \
10217 estats->member = old_estats->member + \
10218 get_stat64(&hw_stats->member)
10220 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10222 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10223 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10225 ESTAT_ADD(rx_octets);
10226 ESTAT_ADD(rx_fragments);
10227 ESTAT_ADD(rx_ucast_packets);
10228 ESTAT_ADD(rx_mcast_packets);
10229 ESTAT_ADD(rx_bcast_packets);
10230 ESTAT_ADD(rx_fcs_errors);
10231 ESTAT_ADD(rx_align_errors);
10232 ESTAT_ADD(rx_xon_pause_rcvd);
10233 ESTAT_ADD(rx_xoff_pause_rcvd);
10234 ESTAT_ADD(rx_mac_ctrl_rcvd);
10235 ESTAT_ADD(rx_xoff_entered);
10236 ESTAT_ADD(rx_frame_too_long_errors);
10237 ESTAT_ADD(rx_jabbers);
10238 ESTAT_ADD(rx_undersize_packets);
10239 ESTAT_ADD(rx_in_length_errors);
10240 ESTAT_ADD(rx_out_length_errors);
10241 ESTAT_ADD(rx_64_or_less_octet_packets);
10242 ESTAT_ADD(rx_65_to_127_octet_packets);
10243 ESTAT_ADD(rx_128_to_255_octet_packets);
10244 ESTAT_ADD(rx_256_to_511_octet_packets);
10245 ESTAT_ADD(rx_512_to_1023_octet_packets);
10246 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10247 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10248 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10249 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10250 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10252 ESTAT_ADD(tx_octets);
10253 ESTAT_ADD(tx_collisions);
10254 ESTAT_ADD(tx_xon_sent);
10255 ESTAT_ADD(tx_xoff_sent);
10256 ESTAT_ADD(tx_flow_control);
10257 ESTAT_ADD(tx_mac_errors);
10258 ESTAT_ADD(tx_single_collisions);
10259 ESTAT_ADD(tx_mult_collisions);
10260 ESTAT_ADD(tx_deferred);
10261 ESTAT_ADD(tx_excessive_collisions);
10262 ESTAT_ADD(tx_late_collisions);
10263 ESTAT_ADD(tx_collide_2times);
10264 ESTAT_ADD(tx_collide_3times);
10265 ESTAT_ADD(tx_collide_4times);
10266 ESTAT_ADD(tx_collide_5times);
10267 ESTAT_ADD(tx_collide_6times);
10268 ESTAT_ADD(tx_collide_7times);
10269 ESTAT_ADD(tx_collide_8times);
10270 ESTAT_ADD(tx_collide_9times);
10271 ESTAT_ADD(tx_collide_10times);
10272 ESTAT_ADD(tx_collide_11times);
10273 ESTAT_ADD(tx_collide_12times);
10274 ESTAT_ADD(tx_collide_13times);
10275 ESTAT_ADD(tx_collide_14times);
10276 ESTAT_ADD(tx_collide_15times);
10277 ESTAT_ADD(tx_ucast_packets);
10278 ESTAT_ADD(tx_mcast_packets);
10279 ESTAT_ADD(tx_bcast_packets);
10280 ESTAT_ADD(tx_carrier_sense_errors);
10281 ESTAT_ADD(tx_discards);
10282 ESTAT_ADD(tx_errors);
10284 ESTAT_ADD(dma_writeq_full);
10285 ESTAT_ADD(dma_write_prioq_full);
10286 ESTAT_ADD(rxbds_empty);
10287 ESTAT_ADD(rx_discards);
10288 ESTAT_ADD(rx_errors);
10289 ESTAT_ADD(rx_threshold_hit);
10291 ESTAT_ADD(dma_readq_full);
10292 ESTAT_ADD(dma_read_prioq_full);
10293 ESTAT_ADD(tx_comp_queue_full);
10295 ESTAT_ADD(ring_set_send_prod_index);
10296 ESTAT_ADD(ring_status_update);
10297 ESTAT_ADD(nic_irqs);
10298 ESTAT_ADD(nic_avoided_irqs);
10299 ESTAT_ADD(nic_tx_threshold_hit);
10301 ESTAT_ADD(mbuf_lwm_thresh_hit);
10304 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10306 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10307 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10309 stats->rx_packets = old_stats->rx_packets +
10310 get_stat64(&hw_stats->rx_ucast_packets) +
10311 get_stat64(&hw_stats->rx_mcast_packets) +
10312 get_stat64(&hw_stats->rx_bcast_packets);
10314 stats->tx_packets = old_stats->tx_packets +
10315 get_stat64(&hw_stats->tx_ucast_packets) +
10316 get_stat64(&hw_stats->tx_mcast_packets) +
10317 get_stat64(&hw_stats->tx_bcast_packets);
10319 stats->rx_bytes = old_stats->rx_bytes +
10320 get_stat64(&hw_stats->rx_octets);
10321 stats->tx_bytes = old_stats->tx_bytes +
10322 get_stat64(&hw_stats->tx_octets);
10324 stats->rx_errors = old_stats->rx_errors +
10325 get_stat64(&hw_stats->rx_errors);
10326 stats->tx_errors = old_stats->tx_errors +
10327 get_stat64(&hw_stats->tx_errors) +
10328 get_stat64(&hw_stats->tx_mac_errors) +
10329 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10330 get_stat64(&hw_stats->tx_discards);
10332 stats->multicast = old_stats->multicast +
10333 get_stat64(&hw_stats->rx_mcast_packets);
10334 stats->collisions = old_stats->collisions +
10335 get_stat64(&hw_stats->tx_collisions);
10337 stats->rx_length_errors = old_stats->rx_length_errors +
10338 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10339 get_stat64(&hw_stats->rx_undersize_packets);
10341 stats->rx_over_errors = old_stats->rx_over_errors +
10342 get_stat64(&hw_stats->rxbds_empty);
10343 stats->rx_frame_errors = old_stats->rx_frame_errors +
10344 get_stat64(&hw_stats->rx_align_errors);
10345 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10346 get_stat64(&hw_stats->tx_discards);
10347 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10348 get_stat64(&hw_stats->tx_carrier_sense_errors);
10350 stats->rx_crc_errors = old_stats->rx_crc_errors +
10351 tg3_calc_crc_errors(tp);
10353 stats->rx_missed_errors = old_stats->rx_missed_errors +
10354 get_stat64(&hw_stats->rx_discards);
10356 stats->rx_dropped = tp->rx_dropped;
10357 stats->tx_dropped = tp->tx_dropped;
10360 static int tg3_get_regs_len(struct net_device *dev)
10362 return TG3_REG_BLK_SIZE;
10365 static void tg3_get_regs(struct net_device *dev,
10366 struct ethtool_regs *regs, void *_p)
10368 struct tg3 *tp = netdev_priv(dev);
10372 memset(_p, 0, TG3_REG_BLK_SIZE);
10374 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10377 tg3_full_lock(tp, 0);
10379 tg3_dump_legacy_regs(tp, (u32 *)_p);
10381 tg3_full_unlock(tp);
10384 static int tg3_get_eeprom_len(struct net_device *dev)
10386 struct tg3 *tp = netdev_priv(dev);
10388 return tp->nvram_size;
10391 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10393 struct tg3 *tp = netdev_priv(dev);
10396 u32 i, offset, len, b_offset, b_count;
10399 if (tg3_flag(tp, NO_NVRAM))
10402 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10405 offset = eeprom->offset;
10409 eeprom->magic = TG3_EEPROM_MAGIC;
10412 /* adjustments to start on required 4 byte boundary */
10413 b_offset = offset & 3;
10414 b_count = 4 - b_offset;
10415 if (b_count > len) {
10416 /* i.e. offset=1 len=2 */
10419 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10422 memcpy(data, ((char *)&val) + b_offset, b_count);
10425 eeprom->len += b_count;
10428 /* read bytes up to the last 4 byte boundary */
10429 pd = &data[eeprom->len];
10430 for (i = 0; i < (len - (len & 3)); i += 4) {
10431 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10436 memcpy(pd + i, &val, 4);
10441 /* read last bytes not ending on 4 byte boundary */
10442 pd = &data[eeprom->len];
10444 b_offset = offset + len - b_count;
10445 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10448 memcpy(pd, &val, b_count);
10449 eeprom->len += b_count;
10454 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10456 struct tg3 *tp = netdev_priv(dev);
10458 u32 offset, len, b_offset, odd_len;
10462 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10465 if (tg3_flag(tp, NO_NVRAM) ||
10466 eeprom->magic != TG3_EEPROM_MAGIC)
10469 offset = eeprom->offset;
10472 if ((b_offset = (offset & 3))) {
10473 /* adjustments to start on required 4 byte boundary */
10474 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10485 /* adjustments to end on required 4 byte boundary */
10487 len = (len + 3) & ~3;
10488 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10494 if (b_offset || odd_len) {
10495 buf = kmalloc(len, GFP_KERNEL);
10499 memcpy(buf, &start, 4);
10501 memcpy(buf+len-4, &end, 4);
10502 memcpy(buf + b_offset, data, eeprom->len);
10505 ret = tg3_nvram_write_block(tp, offset, len, buf);
10513 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10515 struct tg3 *tp = netdev_priv(dev);
10517 if (tg3_flag(tp, USE_PHYLIB)) {
10518 struct phy_device *phydev;
10519 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10521 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10522 return phy_ethtool_gset(phydev, cmd);
10525 cmd->supported = (SUPPORTED_Autoneg);
10527 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10528 cmd->supported |= (SUPPORTED_1000baseT_Half |
10529 SUPPORTED_1000baseT_Full);
10531 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10532 cmd->supported |= (SUPPORTED_100baseT_Half |
10533 SUPPORTED_100baseT_Full |
10534 SUPPORTED_10baseT_Half |
10535 SUPPORTED_10baseT_Full |
10537 cmd->port = PORT_TP;
10539 cmd->supported |= SUPPORTED_FIBRE;
10540 cmd->port = PORT_FIBRE;
10543 cmd->advertising = tp->link_config.advertising;
10544 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10545 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10546 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10547 cmd->advertising |= ADVERTISED_Pause;
10549 cmd->advertising |= ADVERTISED_Pause |
10550 ADVERTISED_Asym_Pause;
10552 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10553 cmd->advertising |= ADVERTISED_Asym_Pause;
10556 if (netif_running(dev) && netif_carrier_ok(dev)) {
10557 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10558 cmd->duplex = tp->link_config.active_duplex;
10559 cmd->lp_advertising = tp->link_config.rmt_adv;
10560 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10561 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10562 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10564 cmd->eth_tp_mdix = ETH_TP_MDI;
10567 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10568 cmd->duplex = DUPLEX_UNKNOWN;
10569 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10571 cmd->phy_address = tp->phy_addr;
10572 cmd->transceiver = XCVR_INTERNAL;
10573 cmd->autoneg = tp->link_config.autoneg;
10579 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10581 struct tg3 *tp = netdev_priv(dev);
10582 u32 speed = ethtool_cmd_speed(cmd);
10584 if (tg3_flag(tp, USE_PHYLIB)) {
10585 struct phy_device *phydev;
10586 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10588 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10589 return phy_ethtool_sset(phydev, cmd);
10592 if (cmd->autoneg != AUTONEG_ENABLE &&
10593 cmd->autoneg != AUTONEG_DISABLE)
10596 if (cmd->autoneg == AUTONEG_DISABLE &&
10597 cmd->duplex != DUPLEX_FULL &&
10598 cmd->duplex != DUPLEX_HALF)
10601 if (cmd->autoneg == AUTONEG_ENABLE) {
10602 u32 mask = ADVERTISED_Autoneg |
10604 ADVERTISED_Asym_Pause;
10606 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10607 mask |= ADVERTISED_1000baseT_Half |
10608 ADVERTISED_1000baseT_Full;
10610 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10611 mask |= ADVERTISED_100baseT_Half |
10612 ADVERTISED_100baseT_Full |
10613 ADVERTISED_10baseT_Half |
10614 ADVERTISED_10baseT_Full |
10617 mask |= ADVERTISED_FIBRE;
10619 if (cmd->advertising & ~mask)
10622 mask &= (ADVERTISED_1000baseT_Half |
10623 ADVERTISED_1000baseT_Full |
10624 ADVERTISED_100baseT_Half |
10625 ADVERTISED_100baseT_Full |
10626 ADVERTISED_10baseT_Half |
10627 ADVERTISED_10baseT_Full);
10629 cmd->advertising &= mask;
10631 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10632 if (speed != SPEED_1000)
10635 if (cmd->duplex != DUPLEX_FULL)
10638 if (speed != SPEED_100 &&
10644 tg3_full_lock(tp, 0);
10646 tp->link_config.autoneg = cmd->autoneg;
10647 if (cmd->autoneg == AUTONEG_ENABLE) {
10648 tp->link_config.advertising = (cmd->advertising |
10649 ADVERTISED_Autoneg);
10650 tp->link_config.speed = SPEED_UNKNOWN;
10651 tp->link_config.duplex = DUPLEX_UNKNOWN;
10653 tp->link_config.advertising = 0;
10654 tp->link_config.speed = speed;
10655 tp->link_config.duplex = cmd->duplex;
10658 if (netif_running(dev))
10659 tg3_setup_phy(tp, 1);
10661 tg3_full_unlock(tp);
10666 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10668 struct tg3 *tp = netdev_priv(dev);
10670 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10671 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10672 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10673 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10676 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10678 struct tg3 *tp = netdev_priv(dev);
10680 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10681 wol->supported = WAKE_MAGIC;
10683 wol->supported = 0;
10685 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10686 wol->wolopts = WAKE_MAGIC;
10687 memset(&wol->sopass, 0, sizeof(wol->sopass));
10690 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10692 struct tg3 *tp = netdev_priv(dev);
10693 struct device *dp = &tp->pdev->dev;
10695 if (wol->wolopts & ~WAKE_MAGIC)
10697 if ((wol->wolopts & WAKE_MAGIC) &&
10698 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10701 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10703 spin_lock_bh(&tp->lock);
10704 if (device_may_wakeup(dp))
10705 tg3_flag_set(tp, WOL_ENABLE);
10707 tg3_flag_clear(tp, WOL_ENABLE);
10708 spin_unlock_bh(&tp->lock);
10713 static u32 tg3_get_msglevel(struct net_device *dev)
10715 struct tg3 *tp = netdev_priv(dev);
10716 return tp->msg_enable;
10719 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10721 struct tg3 *tp = netdev_priv(dev);
10722 tp->msg_enable = value;
10725 static int tg3_nway_reset(struct net_device *dev)
10727 struct tg3 *tp = netdev_priv(dev);
10730 if (!netif_running(dev))
10733 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10736 if (tg3_flag(tp, USE_PHYLIB)) {
10737 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10739 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10743 spin_lock_bh(&tp->lock);
10745 tg3_readphy(tp, MII_BMCR, &bmcr);
10746 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10747 ((bmcr & BMCR_ANENABLE) ||
10748 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10749 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10753 spin_unlock_bh(&tp->lock);
10759 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10761 struct tg3 *tp = netdev_priv(dev);
10763 ering->rx_max_pending = tp->rx_std_ring_mask;
10764 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10765 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10767 ering->rx_jumbo_max_pending = 0;
10769 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10771 ering->rx_pending = tp->rx_pending;
10772 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10773 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10775 ering->rx_jumbo_pending = 0;
10777 ering->tx_pending = tp->napi[0].tx_pending;
10780 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10782 struct tg3 *tp = netdev_priv(dev);
10783 int i, irq_sync = 0, err = 0;
10785 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10786 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10787 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10788 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10789 (tg3_flag(tp, TSO_BUG) &&
10790 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10793 if (netif_running(dev)) {
10795 tg3_netif_stop(tp);
10799 tg3_full_lock(tp, irq_sync);
10801 tp->rx_pending = ering->rx_pending;
10803 if (tg3_flag(tp, MAX_RXPEND_64) &&
10804 tp->rx_pending > 63)
10805 tp->rx_pending = 63;
10806 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10808 for (i = 0; i < tp->irq_max; i++)
10809 tp->napi[i].tx_pending = ering->tx_pending;
10811 if (netif_running(dev)) {
10812 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10813 err = tg3_restart_hw(tp, 1);
10815 tg3_netif_start(tp);
10818 tg3_full_unlock(tp);
10820 if (irq_sync && !err)
10826 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10828 struct tg3 *tp = netdev_priv(dev);
10830 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10832 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10833 epause->rx_pause = 1;
10835 epause->rx_pause = 0;
10837 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10838 epause->tx_pause = 1;
10840 epause->tx_pause = 0;
10843 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10845 struct tg3 *tp = netdev_priv(dev);
10848 if (tg3_flag(tp, USE_PHYLIB)) {
10850 struct phy_device *phydev;
10852 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10854 if (!(phydev->supported & SUPPORTED_Pause) ||
10855 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10856 (epause->rx_pause != epause->tx_pause)))
10859 tp->link_config.flowctrl = 0;
10860 if (epause->rx_pause) {
10861 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10863 if (epause->tx_pause) {
10864 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10865 newadv = ADVERTISED_Pause;
10867 newadv = ADVERTISED_Pause |
10868 ADVERTISED_Asym_Pause;
10869 } else if (epause->tx_pause) {
10870 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10871 newadv = ADVERTISED_Asym_Pause;
10875 if (epause->autoneg)
10876 tg3_flag_set(tp, PAUSE_AUTONEG);
10878 tg3_flag_clear(tp, PAUSE_AUTONEG);
10880 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10881 u32 oldadv = phydev->advertising &
10882 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10883 if (oldadv != newadv) {
10884 phydev->advertising &=
10885 ~(ADVERTISED_Pause |
10886 ADVERTISED_Asym_Pause);
10887 phydev->advertising |= newadv;
10888 if (phydev->autoneg) {
10890 * Always renegotiate the link to
10891 * inform our link partner of our
10892 * flow control settings, even if the
10893 * flow control is forced. Let
10894 * tg3_adjust_link() do the final
10895 * flow control setup.
10897 return phy_start_aneg(phydev);
10901 if (!epause->autoneg)
10902 tg3_setup_flow_control(tp, 0, 0);
10904 tp->link_config.advertising &=
10905 ~(ADVERTISED_Pause |
10906 ADVERTISED_Asym_Pause);
10907 tp->link_config.advertising |= newadv;
10912 if (netif_running(dev)) {
10913 tg3_netif_stop(tp);
10917 tg3_full_lock(tp, irq_sync);
10919 if (epause->autoneg)
10920 tg3_flag_set(tp, PAUSE_AUTONEG);
10922 tg3_flag_clear(tp, PAUSE_AUTONEG);
10923 if (epause->rx_pause)
10924 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10926 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10927 if (epause->tx_pause)
10928 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10930 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10932 if (netif_running(dev)) {
10933 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10934 err = tg3_restart_hw(tp, 1);
10936 tg3_netif_start(tp);
10939 tg3_full_unlock(tp);
10945 static int tg3_get_sset_count(struct net_device *dev, int sset)
10949 return TG3_NUM_TEST;
10951 return TG3_NUM_STATS;
10953 return -EOPNOTSUPP;
10957 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10958 u32 *rules __always_unused)
10960 struct tg3 *tp = netdev_priv(dev);
10962 if (!tg3_flag(tp, SUPPORT_MSIX))
10963 return -EOPNOTSUPP;
10965 switch (info->cmd) {
10966 case ETHTOOL_GRXRINGS:
10967 if (netif_running(tp->dev))
10968 info->data = tp->irq_cnt;
10970 info->data = num_online_cpus();
10971 if (info->data > TG3_IRQ_MAX_VECS_RSS)
10972 info->data = TG3_IRQ_MAX_VECS_RSS;
10975 /* The first interrupt vector only
10976 * handles link interrupts.
10982 return -EOPNOTSUPP;
10986 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10989 struct tg3 *tp = netdev_priv(dev);
10991 if (tg3_flag(tp, SUPPORT_MSIX))
10992 size = TG3_RSS_INDIR_TBL_SIZE;
10997 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10999 struct tg3 *tp = netdev_priv(dev);
11002 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11003 indir[i] = tp->rss_ind_tbl[i];
11008 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11010 struct tg3 *tp = netdev_priv(dev);
11013 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11014 tp->rss_ind_tbl[i] = indir[i];
11016 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11019 /* It is legal to write the indirection
11020 * table while the device is running.
11022 tg3_full_lock(tp, 0);
11023 tg3_rss_write_indir_tbl(tp);
11024 tg3_full_unlock(tp);
11029 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11031 switch (stringset) {
11033 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
11036 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
11039 WARN_ON(1); /* we need a WARN() */
11044 static int tg3_set_phys_id(struct net_device *dev,
11045 enum ethtool_phys_id_state state)
11047 struct tg3 *tp = netdev_priv(dev);
11049 if (!netif_running(tp->dev))
11053 case ETHTOOL_ID_ACTIVE:
11054 return 1; /* cycle on/off once per second */
11056 case ETHTOOL_ID_ON:
11057 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11058 LED_CTRL_1000MBPS_ON |
11059 LED_CTRL_100MBPS_ON |
11060 LED_CTRL_10MBPS_ON |
11061 LED_CTRL_TRAFFIC_OVERRIDE |
11062 LED_CTRL_TRAFFIC_BLINK |
11063 LED_CTRL_TRAFFIC_LED);
11066 case ETHTOOL_ID_OFF:
11067 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11068 LED_CTRL_TRAFFIC_OVERRIDE);
11071 case ETHTOOL_ID_INACTIVE:
11072 tw32(MAC_LED_CTRL, tp->led_ctrl);
11079 static void tg3_get_ethtool_stats(struct net_device *dev,
11080 struct ethtool_stats *estats, u64 *tmp_stats)
11082 struct tg3 *tp = netdev_priv(dev);
11085 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11087 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11090 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11094 u32 offset = 0, len = 0;
11097 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11100 if (magic == TG3_EEPROM_MAGIC) {
11101 for (offset = TG3_NVM_DIR_START;
11102 offset < TG3_NVM_DIR_END;
11103 offset += TG3_NVM_DIRENT_SIZE) {
11104 if (tg3_nvram_read(tp, offset, &val))
11107 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11108 TG3_NVM_DIRTYPE_EXTVPD)
11112 if (offset != TG3_NVM_DIR_END) {
11113 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11114 if (tg3_nvram_read(tp, offset + 4, &offset))
11117 offset = tg3_nvram_logical_addr(tp, offset);
11121 if (!offset || !len) {
11122 offset = TG3_NVM_VPD_OFF;
11123 len = TG3_NVM_VPD_LEN;
11126 buf = kmalloc(len, GFP_KERNEL);
11130 if (magic == TG3_EEPROM_MAGIC) {
11131 for (i = 0; i < len; i += 4) {
11132 /* The data is in little-endian format in NVRAM.
11133 * Use the big-endian read routines to preserve
11134 * the byte order as it exists in NVRAM.
11136 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11142 unsigned int pos = 0;
11144 ptr = (u8 *)&buf[0];
11145 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11146 cnt = pci_read_vpd(tp->pdev, pos,
11148 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11166 #define NVRAM_TEST_SIZE 0x100
11167 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11168 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11169 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11170 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11171 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11172 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11173 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11174 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11176 static int tg3_test_nvram(struct tg3 *tp)
11178 u32 csum, magic, len;
11180 int i, j, k, err = 0, size;
11182 if (tg3_flag(tp, NO_NVRAM))
11185 if (tg3_nvram_read(tp, 0, &magic) != 0)
11188 if (magic == TG3_EEPROM_MAGIC)
11189 size = NVRAM_TEST_SIZE;
11190 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11191 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11192 TG3_EEPROM_SB_FORMAT_1) {
11193 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11194 case TG3_EEPROM_SB_REVISION_0:
11195 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11197 case TG3_EEPROM_SB_REVISION_2:
11198 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11200 case TG3_EEPROM_SB_REVISION_3:
11201 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11203 case TG3_EEPROM_SB_REVISION_4:
11204 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11206 case TG3_EEPROM_SB_REVISION_5:
11207 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11209 case TG3_EEPROM_SB_REVISION_6:
11210 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11217 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11218 size = NVRAM_SELFBOOT_HW_SIZE;
11222 buf = kmalloc(size, GFP_KERNEL);
11227 for (i = 0, j = 0; i < size; i += 4, j++) {
11228 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11235 /* Selfboot format */
11236 magic = be32_to_cpu(buf[0]);
11237 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11238 TG3_EEPROM_MAGIC_FW) {
11239 u8 *buf8 = (u8 *) buf, csum8 = 0;
11241 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11242 TG3_EEPROM_SB_REVISION_2) {
11243 /* For rev 2, the csum doesn't include the MBA. */
11244 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11246 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11249 for (i = 0; i < size; i++)
11262 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11263 TG3_EEPROM_MAGIC_HW) {
11264 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11265 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11266 u8 *buf8 = (u8 *) buf;
11268 /* Separate the parity bits and the data bytes. */
11269 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11270 if ((i == 0) || (i == 8)) {
11274 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11275 parity[k++] = buf8[i] & msk;
11277 } else if (i == 16) {
11281 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11282 parity[k++] = buf8[i] & msk;
11285 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11286 parity[k++] = buf8[i] & msk;
11289 data[j++] = buf8[i];
11293 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11294 u8 hw8 = hweight8(data[i]);
11296 if ((hw8 & 0x1) && parity[i])
11298 else if (!(hw8 & 0x1) && !parity[i])
11307 /* Bootstrap checksum at offset 0x10 */
11308 csum = calc_crc((unsigned char *) buf, 0x10);
11309 if (csum != le32_to_cpu(buf[0x10/4]))
11312 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11313 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11314 if (csum != le32_to_cpu(buf[0xfc/4]))
11319 buf = tg3_vpd_readblock(tp, &len);
11323 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11325 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11329 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11332 i += PCI_VPD_LRDT_TAG_SIZE;
11333 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11334 PCI_VPD_RO_KEYWORD_CHKSUM);
11338 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11340 for (i = 0; i <= j; i++)
11341 csum8 += ((u8 *)buf)[i];
11355 #define TG3_SERDES_TIMEOUT_SEC 2
11356 #define TG3_COPPER_TIMEOUT_SEC 6
11358 static int tg3_test_link(struct tg3 *tp)
11362 if (!netif_running(tp->dev))
11365 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11366 max = TG3_SERDES_TIMEOUT_SEC;
11368 max = TG3_COPPER_TIMEOUT_SEC;
11370 for (i = 0; i < max; i++) {
11371 if (netif_carrier_ok(tp->dev))
11374 if (msleep_interruptible(1000))
11381 /* Only test the commonly used registers */
11382 static int tg3_test_registers(struct tg3 *tp)
11384 int i, is_5705, is_5750;
11385 u32 offset, read_mask, write_mask, val, save_val, read_val;
11389 #define TG3_FL_5705 0x1
11390 #define TG3_FL_NOT_5705 0x2
11391 #define TG3_FL_NOT_5788 0x4
11392 #define TG3_FL_NOT_5750 0x8
11396 /* MAC Control Registers */
11397 { MAC_MODE, TG3_FL_NOT_5705,
11398 0x00000000, 0x00ef6f8c },
11399 { MAC_MODE, TG3_FL_5705,
11400 0x00000000, 0x01ef6b8c },
11401 { MAC_STATUS, TG3_FL_NOT_5705,
11402 0x03800107, 0x00000000 },
11403 { MAC_STATUS, TG3_FL_5705,
11404 0x03800100, 0x00000000 },
11405 { MAC_ADDR_0_HIGH, 0x0000,
11406 0x00000000, 0x0000ffff },
11407 { MAC_ADDR_0_LOW, 0x0000,
11408 0x00000000, 0xffffffff },
11409 { MAC_RX_MTU_SIZE, 0x0000,
11410 0x00000000, 0x0000ffff },
11411 { MAC_TX_MODE, 0x0000,
11412 0x00000000, 0x00000070 },
11413 { MAC_TX_LENGTHS, 0x0000,
11414 0x00000000, 0x00003fff },
11415 { MAC_RX_MODE, TG3_FL_NOT_5705,
11416 0x00000000, 0x000007fc },
11417 { MAC_RX_MODE, TG3_FL_5705,
11418 0x00000000, 0x000007dc },
11419 { MAC_HASH_REG_0, 0x0000,
11420 0x00000000, 0xffffffff },
11421 { MAC_HASH_REG_1, 0x0000,
11422 0x00000000, 0xffffffff },
11423 { MAC_HASH_REG_2, 0x0000,
11424 0x00000000, 0xffffffff },
11425 { MAC_HASH_REG_3, 0x0000,
11426 0x00000000, 0xffffffff },
11428 /* Receive Data and Receive BD Initiator Control Registers. */
11429 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11430 0x00000000, 0xffffffff },
11431 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11432 0x00000000, 0xffffffff },
11433 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11434 0x00000000, 0x00000003 },
11435 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11436 0x00000000, 0xffffffff },
11437 { RCVDBDI_STD_BD+0, 0x0000,
11438 0x00000000, 0xffffffff },
11439 { RCVDBDI_STD_BD+4, 0x0000,
11440 0x00000000, 0xffffffff },
11441 { RCVDBDI_STD_BD+8, 0x0000,
11442 0x00000000, 0xffff0002 },
11443 { RCVDBDI_STD_BD+0xc, 0x0000,
11444 0x00000000, 0xffffffff },
11446 /* Receive BD Initiator Control Registers. */
11447 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11448 0x00000000, 0xffffffff },
11449 { RCVBDI_STD_THRESH, TG3_FL_5705,
11450 0x00000000, 0x000003ff },
11451 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11452 0x00000000, 0xffffffff },
11454 /* Host Coalescing Control Registers. */
11455 { HOSTCC_MODE, TG3_FL_NOT_5705,
11456 0x00000000, 0x00000004 },
11457 { HOSTCC_MODE, TG3_FL_5705,
11458 0x00000000, 0x000000f6 },
11459 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11460 0x00000000, 0xffffffff },
11461 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11462 0x00000000, 0x000003ff },
11463 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11464 0x00000000, 0xffffffff },
11465 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11466 0x00000000, 0x000003ff },
11467 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11468 0x00000000, 0xffffffff },
11469 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11470 0x00000000, 0x000000ff },
11471 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11472 0x00000000, 0xffffffff },
11473 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11474 0x00000000, 0x000000ff },
11475 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11476 0x00000000, 0xffffffff },
11477 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11478 0x00000000, 0xffffffff },
11479 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11480 0x00000000, 0xffffffff },
11481 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11482 0x00000000, 0x000000ff },
11483 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11484 0x00000000, 0xffffffff },
11485 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11486 0x00000000, 0x000000ff },
11487 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11488 0x00000000, 0xffffffff },
11489 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11490 0x00000000, 0xffffffff },
11491 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11492 0x00000000, 0xffffffff },
11493 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11494 0x00000000, 0xffffffff },
11495 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11496 0x00000000, 0xffffffff },
11497 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11498 0xffffffff, 0x00000000 },
11499 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11500 0xffffffff, 0x00000000 },
11502 /* Buffer Manager Control Registers. */
11503 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11504 0x00000000, 0x007fff80 },
11505 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11506 0x00000000, 0x007fffff },
11507 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11508 0x00000000, 0x0000003f },
11509 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11510 0x00000000, 0x000001ff },
11511 { BUFMGR_MB_HIGH_WATER, 0x0000,
11512 0x00000000, 0x000001ff },
11513 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11514 0xffffffff, 0x00000000 },
11515 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11516 0xffffffff, 0x00000000 },
11518 /* Mailbox Registers */
11519 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11520 0x00000000, 0x000001ff },
11521 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11522 0x00000000, 0x000001ff },
11523 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11524 0x00000000, 0x000007ff },
11525 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11526 0x00000000, 0x000001ff },
11528 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11531 is_5705 = is_5750 = 0;
11532 if (tg3_flag(tp, 5705_PLUS)) {
11534 if (tg3_flag(tp, 5750_PLUS))
11538 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11539 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11542 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11545 if (tg3_flag(tp, IS_5788) &&
11546 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11549 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11552 offset = (u32) reg_tbl[i].offset;
11553 read_mask = reg_tbl[i].read_mask;
11554 write_mask = reg_tbl[i].write_mask;
11556 /* Save the original register content */
11557 save_val = tr32(offset);
11559 /* Determine the read-only value. */
11560 read_val = save_val & read_mask;
11562 /* Write zero to the register, then make sure the read-only bits
11563 * are not changed and the read/write bits are all zeros.
11567 val = tr32(offset);
11569 /* Test the read-only and read/write bits. */
11570 if (((val & read_mask) != read_val) || (val & write_mask))
11573 /* Write ones to all the bits defined by RdMask and WrMask, then
11574 * make sure the read-only bits are not changed and the
11575 * read/write bits are all ones.
11577 tw32(offset, read_mask | write_mask);
11579 val = tr32(offset);
11581 /* Test the read-only bits. */
11582 if ((val & read_mask) != read_val)
11585 /* Test the read/write bits. */
11586 if ((val & write_mask) != write_mask)
11589 tw32(offset, save_val);
11595 if (netif_msg_hw(tp))
11596 netdev_err(tp->dev,
11597 "Register test failed at offset %x\n", offset);
11598 tw32(offset, save_val);
11602 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11604 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11608 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11609 for (j = 0; j < len; j += 4) {
11612 tg3_write_mem(tp, offset + j, test_pattern[i]);
11613 tg3_read_mem(tp, offset + j, &val);
11614 if (val != test_pattern[i])
11621 static int tg3_test_memory(struct tg3 *tp)
11623 static struct mem_entry {
11626 } mem_tbl_570x[] = {
11627 { 0x00000000, 0x00b50},
11628 { 0x00002000, 0x1c000},
11629 { 0xffffffff, 0x00000}
11630 }, mem_tbl_5705[] = {
11631 { 0x00000100, 0x0000c},
11632 { 0x00000200, 0x00008},
11633 { 0x00004000, 0x00800},
11634 { 0x00006000, 0x01000},
11635 { 0x00008000, 0x02000},
11636 { 0x00010000, 0x0e000},
11637 { 0xffffffff, 0x00000}
11638 }, mem_tbl_5755[] = {
11639 { 0x00000200, 0x00008},
11640 { 0x00004000, 0x00800},
11641 { 0x00006000, 0x00800},
11642 { 0x00008000, 0x02000},
11643 { 0x00010000, 0x0c000},
11644 { 0xffffffff, 0x00000}
11645 }, mem_tbl_5906[] = {
11646 { 0x00000200, 0x00008},
11647 { 0x00004000, 0x00400},
11648 { 0x00006000, 0x00400},
11649 { 0x00008000, 0x01000},
11650 { 0x00010000, 0x01000},
11651 { 0xffffffff, 0x00000}
11652 }, mem_tbl_5717[] = {
11653 { 0x00000200, 0x00008},
11654 { 0x00010000, 0x0a000},
11655 { 0x00020000, 0x13c00},
11656 { 0xffffffff, 0x00000}
11657 }, mem_tbl_57765[] = {
11658 { 0x00000200, 0x00008},
11659 { 0x00004000, 0x00800},
11660 { 0x00006000, 0x09800},
11661 { 0x00010000, 0x0a000},
11662 { 0xffffffff, 0x00000}
11664 struct mem_entry *mem_tbl;
11668 if (tg3_flag(tp, 5717_PLUS))
11669 mem_tbl = mem_tbl_5717;
11670 else if (tg3_flag(tp, 57765_CLASS))
11671 mem_tbl = mem_tbl_57765;
11672 else if (tg3_flag(tp, 5755_PLUS))
11673 mem_tbl = mem_tbl_5755;
11674 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11675 mem_tbl = mem_tbl_5906;
11676 else if (tg3_flag(tp, 5705_PLUS))
11677 mem_tbl = mem_tbl_5705;
11679 mem_tbl = mem_tbl_570x;
11681 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11682 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11690 #define TG3_TSO_MSS 500
11692 #define TG3_TSO_IP_HDR_LEN 20
11693 #define TG3_TSO_TCP_HDR_LEN 20
11694 #define TG3_TSO_TCP_OPT_LEN 12
11696 static const u8 tg3_tso_header[] = {
11698 0x45, 0x00, 0x00, 0x00,
11699 0x00, 0x00, 0x40, 0x00,
11700 0x40, 0x06, 0x00, 0x00,
11701 0x0a, 0x00, 0x00, 0x01,
11702 0x0a, 0x00, 0x00, 0x02,
11703 0x0d, 0x00, 0xe0, 0x00,
11704 0x00, 0x00, 0x01, 0x00,
11705 0x00, 0x00, 0x02, 0x00,
11706 0x80, 0x10, 0x10, 0x00,
11707 0x14, 0x09, 0x00, 0x00,
11708 0x01, 0x01, 0x08, 0x0a,
11709 0x11, 0x11, 0x11, 0x11,
11710 0x11, 0x11, 0x11, 0x11,
11713 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11715 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11716 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11718 struct sk_buff *skb;
11719 u8 *tx_data, *rx_data;
11721 int num_pkts, tx_len, rx_len, i, err;
11722 struct tg3_rx_buffer_desc *desc;
11723 struct tg3_napi *tnapi, *rnapi;
11724 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11726 tnapi = &tp->napi[0];
11727 rnapi = &tp->napi[0];
11728 if (tp->irq_cnt > 1) {
11729 if (tg3_flag(tp, ENABLE_RSS))
11730 rnapi = &tp->napi[1];
11731 if (tg3_flag(tp, ENABLE_TSS))
11732 tnapi = &tp->napi[1];
11734 coal_now = tnapi->coal_now | rnapi->coal_now;
11739 skb = netdev_alloc_skb(tp->dev, tx_len);
11743 tx_data = skb_put(skb, tx_len);
11744 memcpy(tx_data, tp->dev->dev_addr, 6);
11745 memset(tx_data + 6, 0x0, 8);
11747 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11749 if (tso_loopback) {
11750 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11752 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11753 TG3_TSO_TCP_OPT_LEN;
11755 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11756 sizeof(tg3_tso_header));
11759 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11760 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11762 /* Set the total length field in the IP header */
11763 iph->tot_len = htons((u16)(mss + hdr_len));
11765 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11766 TXD_FLAG_CPU_POST_DMA);
11768 if (tg3_flag(tp, HW_TSO_1) ||
11769 tg3_flag(tp, HW_TSO_2) ||
11770 tg3_flag(tp, HW_TSO_3)) {
11772 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11773 th = (struct tcphdr *)&tx_data[val];
11776 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11778 if (tg3_flag(tp, HW_TSO_3)) {
11779 mss |= (hdr_len & 0xc) << 12;
11780 if (hdr_len & 0x10)
11781 base_flags |= 0x00000010;
11782 base_flags |= (hdr_len & 0x3e0) << 5;
11783 } else if (tg3_flag(tp, HW_TSO_2))
11784 mss |= hdr_len << 9;
11785 else if (tg3_flag(tp, HW_TSO_1) ||
11786 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11787 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11789 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11792 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11795 data_off = ETH_HLEN;
11797 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11798 tx_len > VLAN_ETH_FRAME_LEN)
11799 base_flags |= TXD_FLAG_JMB_PKT;
11802 for (i = data_off; i < tx_len; i++)
11803 tx_data[i] = (u8) (i & 0xff);
11805 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11806 if (pci_dma_mapping_error(tp->pdev, map)) {
11807 dev_kfree_skb(skb);
11811 val = tnapi->tx_prod;
11812 tnapi->tx_buffers[val].skb = skb;
11813 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11815 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11820 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11822 budget = tg3_tx_avail(tnapi);
11823 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11824 base_flags | TXD_FLAG_END, mss, 0)) {
11825 tnapi->tx_buffers[val].skb = NULL;
11826 dev_kfree_skb(skb);
11832 /* Sync BD data before updating mailbox */
11835 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11836 tr32_mailbox(tnapi->prodmbox);
11840 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11841 for (i = 0; i < 35; i++) {
11842 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11847 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11848 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11849 if ((tx_idx == tnapi->tx_prod) &&
11850 (rx_idx == (rx_start_idx + num_pkts)))
11854 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11855 dev_kfree_skb(skb);
11857 if (tx_idx != tnapi->tx_prod)
11860 if (rx_idx != rx_start_idx + num_pkts)
11864 while (rx_idx != rx_start_idx) {
11865 desc = &rnapi->rx_rcb[rx_start_idx++];
11866 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11867 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11869 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11870 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11873 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11876 if (!tso_loopback) {
11877 if (rx_len != tx_len)
11880 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11881 if (opaque_key != RXD_OPAQUE_RING_STD)
11884 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11887 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11888 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11889 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11893 if (opaque_key == RXD_OPAQUE_RING_STD) {
11894 rx_data = tpr->rx_std_buffers[desc_idx].data;
11895 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11897 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11898 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11899 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11904 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11905 PCI_DMA_FROMDEVICE);
11907 rx_data += TG3_RX_OFFSET(tp);
11908 for (i = data_off; i < rx_len; i++, val++) {
11909 if (*(rx_data + i) != (u8) (val & 0xff))
11916 /* tg3_free_rings will unmap and free the rx_data */
11921 #define TG3_STD_LOOPBACK_FAILED 1
11922 #define TG3_JMB_LOOPBACK_FAILED 2
11923 #define TG3_TSO_LOOPBACK_FAILED 4
11924 #define TG3_LOOPBACK_FAILED \
11925 (TG3_STD_LOOPBACK_FAILED | \
11926 TG3_JMB_LOOPBACK_FAILED | \
11927 TG3_TSO_LOOPBACK_FAILED)
11929 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11933 u32 jmb_pkt_sz = 9000;
11936 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11938 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11939 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11941 if (!netif_running(tp->dev)) {
11942 data[0] = TG3_LOOPBACK_FAILED;
11943 data[1] = TG3_LOOPBACK_FAILED;
11945 data[2] = TG3_LOOPBACK_FAILED;
11949 err = tg3_reset_hw(tp, 1);
11951 data[0] = TG3_LOOPBACK_FAILED;
11952 data[1] = TG3_LOOPBACK_FAILED;
11954 data[2] = TG3_LOOPBACK_FAILED;
11958 if (tg3_flag(tp, ENABLE_RSS)) {
11961 /* Reroute all rx packets to the 1st queue */
11962 for (i = MAC_RSS_INDIR_TBL_0;
11963 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11967 /* HW errata - mac loopback fails in some cases on 5780.
11968 * Normal traffic and PHY loopback are not affected by
11969 * errata. Also, the MAC loopback test is deprecated for
11970 * all newer ASIC revisions.
11972 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11973 !tg3_flag(tp, CPMU_PRESENT)) {
11974 tg3_mac_loopback(tp, true);
11976 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11977 data[0] |= TG3_STD_LOOPBACK_FAILED;
11979 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11980 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11981 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11983 tg3_mac_loopback(tp, false);
11986 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11987 !tg3_flag(tp, USE_PHYLIB)) {
11990 tg3_phy_lpbk_set(tp, 0, false);
11992 /* Wait for link */
11993 for (i = 0; i < 100; i++) {
11994 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11999 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12000 data[1] |= TG3_STD_LOOPBACK_FAILED;
12001 if (tg3_flag(tp, TSO_CAPABLE) &&
12002 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12003 data[1] |= TG3_TSO_LOOPBACK_FAILED;
12004 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12005 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12006 data[1] |= TG3_JMB_LOOPBACK_FAILED;
12009 tg3_phy_lpbk_set(tp, 0, true);
12011 /* All link indications report up, but the hardware
12012 * isn't really ready for about 20 msec. Double it
12017 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12018 data[2] |= TG3_STD_LOOPBACK_FAILED;
12019 if (tg3_flag(tp, TSO_CAPABLE) &&
12020 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12021 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12022 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12023 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12024 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12027 /* Re-enable gphy autopowerdown. */
12028 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12029 tg3_phy_toggle_apd(tp, true);
12032 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12035 tp->phy_flags |= eee_cap;
12040 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12043 struct tg3 *tp = netdev_priv(dev);
12044 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12046 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12047 tg3_power_up(tp)) {
12048 etest->flags |= ETH_TEST_FL_FAILED;
12049 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12053 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12055 if (tg3_test_nvram(tp) != 0) {
12056 etest->flags |= ETH_TEST_FL_FAILED;
12059 if (!doextlpbk && tg3_test_link(tp)) {
12060 etest->flags |= ETH_TEST_FL_FAILED;
12063 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12064 int err, err2 = 0, irq_sync = 0;
12066 if (netif_running(dev)) {
12068 tg3_netif_stop(tp);
12072 tg3_full_lock(tp, irq_sync);
12074 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12075 err = tg3_nvram_lock(tp);
12076 tg3_halt_cpu(tp, RX_CPU_BASE);
12077 if (!tg3_flag(tp, 5705_PLUS))
12078 tg3_halt_cpu(tp, TX_CPU_BASE);
12080 tg3_nvram_unlock(tp);
12082 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12085 if (tg3_test_registers(tp) != 0) {
12086 etest->flags |= ETH_TEST_FL_FAILED;
12090 if (tg3_test_memory(tp) != 0) {
12091 etest->flags |= ETH_TEST_FL_FAILED;
12096 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12098 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12099 etest->flags |= ETH_TEST_FL_FAILED;
12101 tg3_full_unlock(tp);
12103 if (tg3_test_interrupt(tp) != 0) {
12104 etest->flags |= ETH_TEST_FL_FAILED;
12108 tg3_full_lock(tp, 0);
12110 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12111 if (netif_running(dev)) {
12112 tg3_flag_set(tp, INIT_COMPLETE);
12113 err2 = tg3_restart_hw(tp, 1);
12115 tg3_netif_start(tp);
12118 tg3_full_unlock(tp);
12120 if (irq_sync && !err2)
12123 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12124 tg3_power_down(tp);
12128 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12130 struct mii_ioctl_data *data = if_mii(ifr);
12131 struct tg3 *tp = netdev_priv(dev);
12134 if (tg3_flag(tp, USE_PHYLIB)) {
12135 struct phy_device *phydev;
12136 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12138 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12139 return phy_mii_ioctl(phydev, ifr, cmd);
12144 data->phy_id = tp->phy_addr;
12147 case SIOCGMIIREG: {
12150 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12151 break; /* We have no PHY */
12153 if (!netif_running(dev))
12156 spin_lock_bh(&tp->lock);
12157 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12158 spin_unlock_bh(&tp->lock);
12160 data->val_out = mii_regval;
12166 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12167 break; /* We have no PHY */
12169 if (!netif_running(dev))
12172 spin_lock_bh(&tp->lock);
12173 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12174 spin_unlock_bh(&tp->lock);
12182 return -EOPNOTSUPP;
12185 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12187 struct tg3 *tp = netdev_priv(dev);
12189 memcpy(ec, &tp->coal, sizeof(*ec));
12193 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12195 struct tg3 *tp = netdev_priv(dev);
12196 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12197 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12199 if (!tg3_flag(tp, 5705_PLUS)) {
12200 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12201 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12202 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12203 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12206 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12207 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12208 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12209 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12210 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12211 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12212 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12213 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12214 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12215 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12218 /* No rx interrupts will be generated if both are zero */
12219 if ((ec->rx_coalesce_usecs == 0) &&
12220 (ec->rx_max_coalesced_frames == 0))
12223 /* No tx interrupts will be generated if both are zero */
12224 if ((ec->tx_coalesce_usecs == 0) &&
12225 (ec->tx_max_coalesced_frames == 0))
12228 /* Only copy relevant parameters, ignore all others. */
12229 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12230 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12231 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12232 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12233 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12234 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12235 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12236 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12237 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12239 if (netif_running(dev)) {
12240 tg3_full_lock(tp, 0);
12241 __tg3_set_coalesce(tp, &tp->coal);
12242 tg3_full_unlock(tp);
12247 static const struct ethtool_ops tg3_ethtool_ops = {
12248 .get_settings = tg3_get_settings,
12249 .set_settings = tg3_set_settings,
12250 .get_drvinfo = tg3_get_drvinfo,
12251 .get_regs_len = tg3_get_regs_len,
12252 .get_regs = tg3_get_regs,
12253 .get_wol = tg3_get_wol,
12254 .set_wol = tg3_set_wol,
12255 .get_msglevel = tg3_get_msglevel,
12256 .set_msglevel = tg3_set_msglevel,
12257 .nway_reset = tg3_nway_reset,
12258 .get_link = ethtool_op_get_link,
12259 .get_eeprom_len = tg3_get_eeprom_len,
12260 .get_eeprom = tg3_get_eeprom,
12261 .set_eeprom = tg3_set_eeprom,
12262 .get_ringparam = tg3_get_ringparam,
12263 .set_ringparam = tg3_set_ringparam,
12264 .get_pauseparam = tg3_get_pauseparam,
12265 .set_pauseparam = tg3_set_pauseparam,
12266 .self_test = tg3_self_test,
12267 .get_strings = tg3_get_strings,
12268 .set_phys_id = tg3_set_phys_id,
12269 .get_ethtool_stats = tg3_get_ethtool_stats,
12270 .get_coalesce = tg3_get_coalesce,
12271 .set_coalesce = tg3_set_coalesce,
12272 .get_sset_count = tg3_get_sset_count,
12273 .get_rxnfc = tg3_get_rxnfc,
12274 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12275 .get_rxfh_indir = tg3_get_rxfh_indir,
12276 .set_rxfh_indir = tg3_set_rxfh_indir,
12277 .get_ts_info = ethtool_op_get_ts_info,
12280 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12281 struct rtnl_link_stats64 *stats)
12283 struct tg3 *tp = netdev_priv(dev);
12286 return &tp->net_stats_prev;
12288 spin_lock_bh(&tp->lock);
12289 tg3_get_nstats(tp, stats);
12290 spin_unlock_bh(&tp->lock);
12295 static void tg3_set_rx_mode(struct net_device *dev)
12297 struct tg3 *tp = netdev_priv(dev);
12299 if (!netif_running(dev))
12302 tg3_full_lock(tp, 0);
12303 __tg3_set_rx_mode(dev);
12304 tg3_full_unlock(tp);
12307 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12310 dev->mtu = new_mtu;
12312 if (new_mtu > ETH_DATA_LEN) {
12313 if (tg3_flag(tp, 5780_CLASS)) {
12314 netdev_update_features(dev);
12315 tg3_flag_clear(tp, TSO_CAPABLE);
12317 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12320 if (tg3_flag(tp, 5780_CLASS)) {
12321 tg3_flag_set(tp, TSO_CAPABLE);
12322 netdev_update_features(dev);
12324 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12328 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12330 struct tg3 *tp = netdev_priv(dev);
12331 int err, reset_phy = 0;
12333 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12336 if (!netif_running(dev)) {
12337 /* We'll just catch it later when the
12340 tg3_set_mtu(dev, tp, new_mtu);
12346 tg3_netif_stop(tp);
12348 tg3_full_lock(tp, 1);
12350 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12352 tg3_set_mtu(dev, tp, new_mtu);
12354 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12355 * breaks all requests to 256 bytes.
12357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12360 err = tg3_restart_hw(tp, reset_phy);
12363 tg3_netif_start(tp);
12365 tg3_full_unlock(tp);
12373 static const struct net_device_ops tg3_netdev_ops = {
12374 .ndo_open = tg3_open,
12375 .ndo_stop = tg3_close,
12376 .ndo_start_xmit = tg3_start_xmit,
12377 .ndo_get_stats64 = tg3_get_stats64,
12378 .ndo_validate_addr = eth_validate_addr,
12379 .ndo_set_rx_mode = tg3_set_rx_mode,
12380 .ndo_set_mac_address = tg3_set_mac_addr,
12381 .ndo_do_ioctl = tg3_ioctl,
12382 .ndo_tx_timeout = tg3_tx_timeout,
12383 .ndo_change_mtu = tg3_change_mtu,
12384 .ndo_fix_features = tg3_fix_features,
12385 .ndo_set_features = tg3_set_features,
12386 #ifdef CONFIG_NET_POLL_CONTROLLER
12387 .ndo_poll_controller = tg3_poll_controller,
12391 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12393 u32 cursize, val, magic;
12395 tp->nvram_size = EEPROM_CHIP_SIZE;
12397 if (tg3_nvram_read(tp, 0, &magic) != 0)
12400 if ((magic != TG3_EEPROM_MAGIC) &&
12401 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12402 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12406 * Size the chip by reading offsets at increasing powers of two.
12407 * When we encounter our validation signature, we know the addressing
12408 * has wrapped around, and thus have our chip size.
12412 while (cursize < tp->nvram_size) {
12413 if (tg3_nvram_read(tp, cursize, &val) != 0)
12422 tp->nvram_size = cursize;
12425 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12429 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12432 /* Selfboot format */
12433 if (val != TG3_EEPROM_MAGIC) {
12434 tg3_get_eeprom_size(tp);
12438 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12440 /* This is confusing. We want to operate on the
12441 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12442 * call will read from NVRAM and byteswap the data
12443 * according to the byteswapping settings for all
12444 * other register accesses. This ensures the data we
12445 * want will always reside in the lower 16-bits.
12446 * However, the data in NVRAM is in LE format, which
12447 * means the data from the NVRAM read will always be
12448 * opposite the endianness of the CPU. The 16-bit
12449 * byteswap then brings the data to CPU endianness.
12451 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12455 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12458 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12462 nvcfg1 = tr32(NVRAM_CFG1);
12463 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12464 tg3_flag_set(tp, FLASH);
12466 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12467 tw32(NVRAM_CFG1, nvcfg1);
12470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12471 tg3_flag(tp, 5780_CLASS)) {
12472 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12473 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12474 tp->nvram_jedecnum = JEDEC_ATMEL;
12475 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12476 tg3_flag_set(tp, NVRAM_BUFFERED);
12478 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12479 tp->nvram_jedecnum = JEDEC_ATMEL;
12480 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12482 case FLASH_VENDOR_ATMEL_EEPROM:
12483 tp->nvram_jedecnum = JEDEC_ATMEL;
12484 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12485 tg3_flag_set(tp, NVRAM_BUFFERED);
12487 case FLASH_VENDOR_ST:
12488 tp->nvram_jedecnum = JEDEC_ST;
12489 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12490 tg3_flag_set(tp, NVRAM_BUFFERED);
12492 case FLASH_VENDOR_SAIFUN:
12493 tp->nvram_jedecnum = JEDEC_SAIFUN;
12494 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12496 case FLASH_VENDOR_SST_SMALL:
12497 case FLASH_VENDOR_SST_LARGE:
12498 tp->nvram_jedecnum = JEDEC_SST;
12499 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12503 tp->nvram_jedecnum = JEDEC_ATMEL;
12504 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12505 tg3_flag_set(tp, NVRAM_BUFFERED);
12509 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12511 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12512 case FLASH_5752PAGE_SIZE_256:
12513 tp->nvram_pagesize = 256;
12515 case FLASH_5752PAGE_SIZE_512:
12516 tp->nvram_pagesize = 512;
12518 case FLASH_5752PAGE_SIZE_1K:
12519 tp->nvram_pagesize = 1024;
12521 case FLASH_5752PAGE_SIZE_2K:
12522 tp->nvram_pagesize = 2048;
12524 case FLASH_5752PAGE_SIZE_4K:
12525 tp->nvram_pagesize = 4096;
12527 case FLASH_5752PAGE_SIZE_264:
12528 tp->nvram_pagesize = 264;
12530 case FLASH_5752PAGE_SIZE_528:
12531 tp->nvram_pagesize = 528;
12536 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12540 nvcfg1 = tr32(NVRAM_CFG1);
12542 /* NVRAM protection for TPM */
12543 if (nvcfg1 & (1 << 27))
12544 tg3_flag_set(tp, PROTECTED_NVRAM);
12546 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12547 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12548 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12549 tp->nvram_jedecnum = JEDEC_ATMEL;
12550 tg3_flag_set(tp, NVRAM_BUFFERED);
12552 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12553 tp->nvram_jedecnum = JEDEC_ATMEL;
12554 tg3_flag_set(tp, NVRAM_BUFFERED);
12555 tg3_flag_set(tp, FLASH);
12557 case FLASH_5752VENDOR_ST_M45PE10:
12558 case FLASH_5752VENDOR_ST_M45PE20:
12559 case FLASH_5752VENDOR_ST_M45PE40:
12560 tp->nvram_jedecnum = JEDEC_ST;
12561 tg3_flag_set(tp, NVRAM_BUFFERED);
12562 tg3_flag_set(tp, FLASH);
12566 if (tg3_flag(tp, FLASH)) {
12567 tg3_nvram_get_pagesize(tp, nvcfg1);
12569 /* For eeprom, set pagesize to maximum eeprom size */
12570 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12572 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12573 tw32(NVRAM_CFG1, nvcfg1);
12577 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12579 u32 nvcfg1, protect = 0;
12581 nvcfg1 = tr32(NVRAM_CFG1);
12583 /* NVRAM protection for TPM */
12584 if (nvcfg1 & (1 << 27)) {
12585 tg3_flag_set(tp, PROTECTED_NVRAM);
12589 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12591 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12592 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12593 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12594 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12595 tp->nvram_jedecnum = JEDEC_ATMEL;
12596 tg3_flag_set(tp, NVRAM_BUFFERED);
12597 tg3_flag_set(tp, FLASH);
12598 tp->nvram_pagesize = 264;
12599 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12600 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12601 tp->nvram_size = (protect ? 0x3e200 :
12602 TG3_NVRAM_SIZE_512KB);
12603 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12604 tp->nvram_size = (protect ? 0x1f200 :
12605 TG3_NVRAM_SIZE_256KB);
12607 tp->nvram_size = (protect ? 0x1f200 :
12608 TG3_NVRAM_SIZE_128KB);
12610 case FLASH_5752VENDOR_ST_M45PE10:
12611 case FLASH_5752VENDOR_ST_M45PE20:
12612 case FLASH_5752VENDOR_ST_M45PE40:
12613 tp->nvram_jedecnum = JEDEC_ST;
12614 tg3_flag_set(tp, NVRAM_BUFFERED);
12615 tg3_flag_set(tp, FLASH);
12616 tp->nvram_pagesize = 256;
12617 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12618 tp->nvram_size = (protect ?
12619 TG3_NVRAM_SIZE_64KB :
12620 TG3_NVRAM_SIZE_128KB);
12621 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12622 tp->nvram_size = (protect ?
12623 TG3_NVRAM_SIZE_64KB :
12624 TG3_NVRAM_SIZE_256KB);
12626 tp->nvram_size = (protect ?
12627 TG3_NVRAM_SIZE_128KB :
12628 TG3_NVRAM_SIZE_512KB);
12633 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12637 nvcfg1 = tr32(NVRAM_CFG1);
12639 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12640 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12641 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12642 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12643 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12644 tp->nvram_jedecnum = JEDEC_ATMEL;
12645 tg3_flag_set(tp, NVRAM_BUFFERED);
12646 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12648 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12649 tw32(NVRAM_CFG1, nvcfg1);
12651 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12652 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12653 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12654 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12655 tp->nvram_jedecnum = JEDEC_ATMEL;
12656 tg3_flag_set(tp, NVRAM_BUFFERED);
12657 tg3_flag_set(tp, FLASH);
12658 tp->nvram_pagesize = 264;
12660 case FLASH_5752VENDOR_ST_M45PE10:
12661 case FLASH_5752VENDOR_ST_M45PE20:
12662 case FLASH_5752VENDOR_ST_M45PE40:
12663 tp->nvram_jedecnum = JEDEC_ST;
12664 tg3_flag_set(tp, NVRAM_BUFFERED);
12665 tg3_flag_set(tp, FLASH);
12666 tp->nvram_pagesize = 256;
12671 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12673 u32 nvcfg1, protect = 0;
12675 nvcfg1 = tr32(NVRAM_CFG1);
12677 /* NVRAM protection for TPM */
12678 if (nvcfg1 & (1 << 27)) {
12679 tg3_flag_set(tp, PROTECTED_NVRAM);
12683 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12685 case FLASH_5761VENDOR_ATMEL_ADB021D:
12686 case FLASH_5761VENDOR_ATMEL_ADB041D:
12687 case FLASH_5761VENDOR_ATMEL_ADB081D:
12688 case FLASH_5761VENDOR_ATMEL_ADB161D:
12689 case FLASH_5761VENDOR_ATMEL_MDB021D:
12690 case FLASH_5761VENDOR_ATMEL_MDB041D:
12691 case FLASH_5761VENDOR_ATMEL_MDB081D:
12692 case FLASH_5761VENDOR_ATMEL_MDB161D:
12693 tp->nvram_jedecnum = JEDEC_ATMEL;
12694 tg3_flag_set(tp, NVRAM_BUFFERED);
12695 tg3_flag_set(tp, FLASH);
12696 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12697 tp->nvram_pagesize = 256;
12699 case FLASH_5761VENDOR_ST_A_M45PE20:
12700 case FLASH_5761VENDOR_ST_A_M45PE40:
12701 case FLASH_5761VENDOR_ST_A_M45PE80:
12702 case FLASH_5761VENDOR_ST_A_M45PE16:
12703 case FLASH_5761VENDOR_ST_M_M45PE20:
12704 case FLASH_5761VENDOR_ST_M_M45PE40:
12705 case FLASH_5761VENDOR_ST_M_M45PE80:
12706 case FLASH_5761VENDOR_ST_M_M45PE16:
12707 tp->nvram_jedecnum = JEDEC_ST;
12708 tg3_flag_set(tp, NVRAM_BUFFERED);
12709 tg3_flag_set(tp, FLASH);
12710 tp->nvram_pagesize = 256;
12715 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12718 case FLASH_5761VENDOR_ATMEL_ADB161D:
12719 case FLASH_5761VENDOR_ATMEL_MDB161D:
12720 case FLASH_5761VENDOR_ST_A_M45PE16:
12721 case FLASH_5761VENDOR_ST_M_M45PE16:
12722 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12724 case FLASH_5761VENDOR_ATMEL_ADB081D:
12725 case FLASH_5761VENDOR_ATMEL_MDB081D:
12726 case FLASH_5761VENDOR_ST_A_M45PE80:
12727 case FLASH_5761VENDOR_ST_M_M45PE80:
12728 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12730 case FLASH_5761VENDOR_ATMEL_ADB041D:
12731 case FLASH_5761VENDOR_ATMEL_MDB041D:
12732 case FLASH_5761VENDOR_ST_A_M45PE40:
12733 case FLASH_5761VENDOR_ST_M_M45PE40:
12734 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12736 case FLASH_5761VENDOR_ATMEL_ADB021D:
12737 case FLASH_5761VENDOR_ATMEL_MDB021D:
12738 case FLASH_5761VENDOR_ST_A_M45PE20:
12739 case FLASH_5761VENDOR_ST_M_M45PE20:
12740 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12746 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12748 tp->nvram_jedecnum = JEDEC_ATMEL;
12749 tg3_flag_set(tp, NVRAM_BUFFERED);
12750 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12753 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12757 nvcfg1 = tr32(NVRAM_CFG1);
12759 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12760 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12761 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12762 tp->nvram_jedecnum = JEDEC_ATMEL;
12763 tg3_flag_set(tp, NVRAM_BUFFERED);
12764 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12766 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12767 tw32(NVRAM_CFG1, nvcfg1);
12769 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12770 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12771 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12772 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12773 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12774 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12775 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12776 tp->nvram_jedecnum = JEDEC_ATMEL;
12777 tg3_flag_set(tp, NVRAM_BUFFERED);
12778 tg3_flag_set(tp, FLASH);
12780 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12781 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12782 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12783 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12784 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12786 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12787 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12788 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12790 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12791 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12792 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12796 case FLASH_5752VENDOR_ST_M45PE10:
12797 case FLASH_5752VENDOR_ST_M45PE20:
12798 case FLASH_5752VENDOR_ST_M45PE40:
12799 tp->nvram_jedecnum = JEDEC_ST;
12800 tg3_flag_set(tp, NVRAM_BUFFERED);
12801 tg3_flag_set(tp, FLASH);
12803 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12804 case FLASH_5752VENDOR_ST_M45PE10:
12805 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12807 case FLASH_5752VENDOR_ST_M45PE20:
12808 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12810 case FLASH_5752VENDOR_ST_M45PE40:
12811 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12816 tg3_flag_set(tp, NO_NVRAM);
12820 tg3_nvram_get_pagesize(tp, nvcfg1);
12821 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12822 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12826 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12830 nvcfg1 = tr32(NVRAM_CFG1);
12832 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12833 case FLASH_5717VENDOR_ATMEL_EEPROM:
12834 case FLASH_5717VENDOR_MICRO_EEPROM:
12835 tp->nvram_jedecnum = JEDEC_ATMEL;
12836 tg3_flag_set(tp, NVRAM_BUFFERED);
12837 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12839 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12840 tw32(NVRAM_CFG1, nvcfg1);
12842 case FLASH_5717VENDOR_ATMEL_MDB011D:
12843 case FLASH_5717VENDOR_ATMEL_ADB011B:
12844 case FLASH_5717VENDOR_ATMEL_ADB011D:
12845 case FLASH_5717VENDOR_ATMEL_MDB021D:
12846 case FLASH_5717VENDOR_ATMEL_ADB021B:
12847 case FLASH_5717VENDOR_ATMEL_ADB021D:
12848 case FLASH_5717VENDOR_ATMEL_45USPT:
12849 tp->nvram_jedecnum = JEDEC_ATMEL;
12850 tg3_flag_set(tp, NVRAM_BUFFERED);
12851 tg3_flag_set(tp, FLASH);
12853 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12854 case FLASH_5717VENDOR_ATMEL_MDB021D:
12855 /* Detect size with tg3_nvram_get_size() */
12857 case FLASH_5717VENDOR_ATMEL_ADB021B:
12858 case FLASH_5717VENDOR_ATMEL_ADB021D:
12859 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12862 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12866 case FLASH_5717VENDOR_ST_M_M25PE10:
12867 case FLASH_5717VENDOR_ST_A_M25PE10:
12868 case FLASH_5717VENDOR_ST_M_M45PE10:
12869 case FLASH_5717VENDOR_ST_A_M45PE10:
12870 case FLASH_5717VENDOR_ST_M_M25PE20:
12871 case FLASH_5717VENDOR_ST_A_M25PE20:
12872 case FLASH_5717VENDOR_ST_M_M45PE20:
12873 case FLASH_5717VENDOR_ST_A_M45PE20:
12874 case FLASH_5717VENDOR_ST_25USPT:
12875 case FLASH_5717VENDOR_ST_45USPT:
12876 tp->nvram_jedecnum = JEDEC_ST;
12877 tg3_flag_set(tp, NVRAM_BUFFERED);
12878 tg3_flag_set(tp, FLASH);
12880 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12881 case FLASH_5717VENDOR_ST_M_M25PE20:
12882 case FLASH_5717VENDOR_ST_M_M45PE20:
12883 /* Detect size with tg3_nvram_get_size() */
12885 case FLASH_5717VENDOR_ST_A_M25PE20:
12886 case FLASH_5717VENDOR_ST_A_M45PE20:
12887 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12890 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12895 tg3_flag_set(tp, NO_NVRAM);
12899 tg3_nvram_get_pagesize(tp, nvcfg1);
12900 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12901 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12904 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12906 u32 nvcfg1, nvmpinstrp;
12908 nvcfg1 = tr32(NVRAM_CFG1);
12909 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12911 switch (nvmpinstrp) {
12912 case FLASH_5720_EEPROM_HD:
12913 case FLASH_5720_EEPROM_LD:
12914 tp->nvram_jedecnum = JEDEC_ATMEL;
12915 tg3_flag_set(tp, NVRAM_BUFFERED);
12917 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12918 tw32(NVRAM_CFG1, nvcfg1);
12919 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12920 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12922 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12924 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12925 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12926 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12927 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12928 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12929 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12930 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12931 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12932 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12933 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12934 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12935 case FLASH_5720VENDOR_ATMEL_45USPT:
12936 tp->nvram_jedecnum = JEDEC_ATMEL;
12937 tg3_flag_set(tp, NVRAM_BUFFERED);
12938 tg3_flag_set(tp, FLASH);
12940 switch (nvmpinstrp) {
12941 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12942 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12943 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12944 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12946 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12947 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12948 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12949 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12951 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12952 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12953 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12956 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12960 case FLASH_5720VENDOR_M_ST_M25PE10:
12961 case FLASH_5720VENDOR_M_ST_M45PE10:
12962 case FLASH_5720VENDOR_A_ST_M25PE10:
12963 case FLASH_5720VENDOR_A_ST_M45PE10:
12964 case FLASH_5720VENDOR_M_ST_M25PE20:
12965 case FLASH_5720VENDOR_M_ST_M45PE20:
12966 case FLASH_5720VENDOR_A_ST_M25PE20:
12967 case FLASH_5720VENDOR_A_ST_M45PE20:
12968 case FLASH_5720VENDOR_M_ST_M25PE40:
12969 case FLASH_5720VENDOR_M_ST_M45PE40:
12970 case FLASH_5720VENDOR_A_ST_M25PE40:
12971 case FLASH_5720VENDOR_A_ST_M45PE40:
12972 case FLASH_5720VENDOR_M_ST_M25PE80:
12973 case FLASH_5720VENDOR_M_ST_M45PE80:
12974 case FLASH_5720VENDOR_A_ST_M25PE80:
12975 case FLASH_5720VENDOR_A_ST_M45PE80:
12976 case FLASH_5720VENDOR_ST_25USPT:
12977 case FLASH_5720VENDOR_ST_45USPT:
12978 tp->nvram_jedecnum = JEDEC_ST;
12979 tg3_flag_set(tp, NVRAM_BUFFERED);
12980 tg3_flag_set(tp, FLASH);
12982 switch (nvmpinstrp) {
12983 case FLASH_5720VENDOR_M_ST_M25PE20:
12984 case FLASH_5720VENDOR_M_ST_M45PE20:
12985 case FLASH_5720VENDOR_A_ST_M25PE20:
12986 case FLASH_5720VENDOR_A_ST_M45PE20:
12987 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12989 case FLASH_5720VENDOR_M_ST_M25PE40:
12990 case FLASH_5720VENDOR_M_ST_M45PE40:
12991 case FLASH_5720VENDOR_A_ST_M25PE40:
12992 case FLASH_5720VENDOR_A_ST_M45PE40:
12993 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12995 case FLASH_5720VENDOR_M_ST_M25PE80:
12996 case FLASH_5720VENDOR_M_ST_M45PE80:
12997 case FLASH_5720VENDOR_A_ST_M25PE80:
12998 case FLASH_5720VENDOR_A_ST_M45PE80:
12999 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13002 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13007 tg3_flag_set(tp, NO_NVRAM);
13011 tg3_nvram_get_pagesize(tp, nvcfg1);
13012 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13013 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13016 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13017 static void __devinit tg3_nvram_init(struct tg3 *tp)
13019 tw32_f(GRC_EEPROM_ADDR,
13020 (EEPROM_ADDR_FSM_RESET |
13021 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13022 EEPROM_ADDR_CLKPERD_SHIFT)));
13026 /* Enable seeprom accesses. */
13027 tw32_f(GRC_LOCAL_CTRL,
13028 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13031 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13032 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13033 tg3_flag_set(tp, NVRAM);
13035 if (tg3_nvram_lock(tp)) {
13036 netdev_warn(tp->dev,
13037 "Cannot get nvram lock, %s failed\n",
13041 tg3_enable_nvram_access(tp);
13043 tp->nvram_size = 0;
13045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13046 tg3_get_5752_nvram_info(tp);
13047 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13048 tg3_get_5755_nvram_info(tp);
13049 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13052 tg3_get_5787_nvram_info(tp);
13053 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13054 tg3_get_5761_nvram_info(tp);
13055 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13056 tg3_get_5906_nvram_info(tp);
13057 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13058 tg3_flag(tp, 57765_CLASS))
13059 tg3_get_57780_nvram_info(tp);
13060 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13062 tg3_get_5717_nvram_info(tp);
13063 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13064 tg3_get_5720_nvram_info(tp);
13066 tg3_get_nvram_info(tp);
13068 if (tp->nvram_size == 0)
13069 tg3_get_nvram_size(tp);
13071 tg3_disable_nvram_access(tp);
13072 tg3_nvram_unlock(tp);
13075 tg3_flag_clear(tp, NVRAM);
13076 tg3_flag_clear(tp, NVRAM_BUFFERED);
13078 tg3_get_eeprom_size(tp);
13082 struct subsys_tbl_ent {
13083 u16 subsys_vendor, subsys_devid;
13087 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13088 /* Broadcom boards. */
13089 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13090 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13091 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13092 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13093 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13094 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13095 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13096 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13097 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13098 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13099 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13100 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13101 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13102 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13103 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13104 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13105 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13106 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13107 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13108 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13109 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13110 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13113 { TG3PCI_SUBVENDOR_ID_3COM,
13114 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13115 { TG3PCI_SUBVENDOR_ID_3COM,
13116 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13117 { TG3PCI_SUBVENDOR_ID_3COM,
13118 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13119 { TG3PCI_SUBVENDOR_ID_3COM,
13120 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13121 { TG3PCI_SUBVENDOR_ID_3COM,
13122 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13125 { TG3PCI_SUBVENDOR_ID_DELL,
13126 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13127 { TG3PCI_SUBVENDOR_ID_DELL,
13128 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13129 { TG3PCI_SUBVENDOR_ID_DELL,
13130 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13131 { TG3PCI_SUBVENDOR_ID_DELL,
13132 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13134 /* Compaq boards. */
13135 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13136 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13137 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13138 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13139 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13140 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13141 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13142 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13143 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13144 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13147 { TG3PCI_SUBVENDOR_ID_IBM,
13148 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13151 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13155 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13156 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13157 tp->pdev->subsystem_vendor) &&
13158 (subsys_id_to_phy_id[i].subsys_devid ==
13159 tp->pdev->subsystem_device))
13160 return &subsys_id_to_phy_id[i];
13165 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13169 tp->phy_id = TG3_PHY_ID_INVALID;
13170 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13172 /* Assume an onboard device and WOL capable by default. */
13173 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13174 tg3_flag_set(tp, WOL_CAP);
13176 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13177 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13178 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13179 tg3_flag_set(tp, IS_NIC);
13181 val = tr32(VCPU_CFGSHDW);
13182 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13183 tg3_flag_set(tp, ASPM_WORKAROUND);
13184 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13185 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13186 tg3_flag_set(tp, WOL_ENABLE);
13187 device_set_wakeup_enable(&tp->pdev->dev, true);
13192 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13193 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13194 u32 nic_cfg, led_cfg;
13195 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13196 int eeprom_phy_serdes = 0;
13198 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13199 tp->nic_sram_data_cfg = nic_cfg;
13201 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13202 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13203 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13204 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13205 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13206 (ver > 0) && (ver < 0x100))
13207 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13209 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13210 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13212 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13213 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13214 eeprom_phy_serdes = 1;
13216 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13217 if (nic_phy_id != 0) {
13218 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13219 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13221 eeprom_phy_id = (id1 >> 16) << 10;
13222 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13223 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13227 tp->phy_id = eeprom_phy_id;
13228 if (eeprom_phy_serdes) {
13229 if (!tg3_flag(tp, 5705_PLUS))
13230 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13232 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13235 if (tg3_flag(tp, 5750_PLUS))
13236 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13237 SHASTA_EXT_LED_MODE_MASK);
13239 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13243 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13244 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13247 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13248 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13251 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13252 tp->led_ctrl = LED_CTRL_MODE_MAC;
13254 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13255 * read on some older 5700/5701 bootcode.
13257 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13259 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13261 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13265 case SHASTA_EXT_LED_SHARED:
13266 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13267 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13268 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13269 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13270 LED_CTRL_MODE_PHY_2);
13273 case SHASTA_EXT_LED_MAC:
13274 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13277 case SHASTA_EXT_LED_COMBO:
13278 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13279 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13280 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13281 LED_CTRL_MODE_PHY_2);
13286 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13287 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13288 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13289 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13291 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13292 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13294 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13295 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13296 if ((tp->pdev->subsystem_vendor ==
13297 PCI_VENDOR_ID_ARIMA) &&
13298 (tp->pdev->subsystem_device == 0x205a ||
13299 tp->pdev->subsystem_device == 0x2063))
13300 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13302 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13303 tg3_flag_set(tp, IS_NIC);
13306 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13307 tg3_flag_set(tp, ENABLE_ASF);
13308 if (tg3_flag(tp, 5750_PLUS))
13309 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13312 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13313 tg3_flag(tp, 5750_PLUS))
13314 tg3_flag_set(tp, ENABLE_APE);
13316 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13317 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13318 tg3_flag_clear(tp, WOL_CAP);
13320 if (tg3_flag(tp, WOL_CAP) &&
13321 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13322 tg3_flag_set(tp, WOL_ENABLE);
13323 device_set_wakeup_enable(&tp->pdev->dev, true);
13326 if (cfg2 & (1 << 17))
13327 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13329 /* serdes signal pre-emphasis in register 0x590 set by */
13330 /* bootcode if bit 18 is set */
13331 if (cfg2 & (1 << 18))
13332 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13334 if ((tg3_flag(tp, 57765_PLUS) ||
13335 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13336 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13337 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13338 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13340 if (tg3_flag(tp, PCI_EXPRESS) &&
13341 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13342 !tg3_flag(tp, 57765_PLUS)) {
13345 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13346 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13347 tg3_flag_set(tp, ASPM_WORKAROUND);
13350 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13351 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13352 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13353 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13354 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13355 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13358 if (tg3_flag(tp, WOL_CAP))
13359 device_set_wakeup_enable(&tp->pdev->dev,
13360 tg3_flag(tp, WOL_ENABLE));
13362 device_set_wakeup_capable(&tp->pdev->dev, false);
13365 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13370 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13371 tw32(OTP_CTRL, cmd);
13373 /* Wait for up to 1 ms for command to execute. */
13374 for (i = 0; i < 100; i++) {
13375 val = tr32(OTP_STATUS);
13376 if (val & OTP_STATUS_CMD_DONE)
13381 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13384 /* Read the gphy configuration from the OTP region of the chip. The gphy
13385 * configuration is a 32-bit value that straddles the alignment boundary.
13386 * We do two 32-bit reads and then shift and merge the results.
13388 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13390 u32 bhalf_otp, thalf_otp;
13392 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13394 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13397 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13399 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13402 thalf_otp = tr32(OTP_READ_DATA);
13404 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13406 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13409 bhalf_otp = tr32(OTP_READ_DATA);
13411 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13414 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13416 u32 adv = ADVERTISED_Autoneg;
13418 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13419 adv |= ADVERTISED_1000baseT_Half |
13420 ADVERTISED_1000baseT_Full;
13422 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13423 adv |= ADVERTISED_100baseT_Half |
13424 ADVERTISED_100baseT_Full |
13425 ADVERTISED_10baseT_Half |
13426 ADVERTISED_10baseT_Full |
13429 adv |= ADVERTISED_FIBRE;
13431 tp->link_config.advertising = adv;
13432 tp->link_config.speed = SPEED_UNKNOWN;
13433 tp->link_config.duplex = DUPLEX_UNKNOWN;
13434 tp->link_config.autoneg = AUTONEG_ENABLE;
13435 tp->link_config.active_speed = SPEED_UNKNOWN;
13436 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13441 static int __devinit tg3_phy_probe(struct tg3 *tp)
13443 u32 hw_phy_id_1, hw_phy_id_2;
13444 u32 hw_phy_id, hw_phy_id_masked;
13447 /* flow control autonegotiation is default behavior */
13448 tg3_flag_set(tp, PAUSE_AUTONEG);
13449 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13451 if (tg3_flag(tp, USE_PHYLIB))
13452 return tg3_phy_init(tp);
13454 /* Reading the PHY ID register can conflict with ASF
13455 * firmware access to the PHY hardware.
13458 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13459 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13461 /* Now read the physical PHY_ID from the chip and verify
13462 * that it is sane. If it doesn't look good, we fall back
13463 * to either the hard-coded table based PHY_ID and failing
13464 * that the value found in the eeprom area.
13466 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13467 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13469 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13470 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13471 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13473 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13476 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13477 tp->phy_id = hw_phy_id;
13478 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13479 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13481 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13483 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13484 /* Do nothing, phy ID already set up in
13485 * tg3_get_eeprom_hw_cfg().
13488 struct subsys_tbl_ent *p;
13490 /* No eeprom signature? Try the hardcoded
13491 * subsys device table.
13493 p = tg3_lookup_by_subsys(tp);
13497 tp->phy_id = p->phy_id;
13499 tp->phy_id == TG3_PHY_ID_BCM8002)
13500 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13504 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13505 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13507 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13508 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13509 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13510 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13511 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13513 tg3_phy_init_link_config(tp);
13515 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13516 !tg3_flag(tp, ENABLE_APE) &&
13517 !tg3_flag(tp, ENABLE_ASF)) {
13520 tg3_readphy(tp, MII_BMSR, &bmsr);
13521 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13522 (bmsr & BMSR_LSTATUS))
13523 goto skip_phy_reset;
13525 err = tg3_phy_reset(tp);
13529 tg3_phy_set_wirespeed(tp);
13531 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13532 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13533 tp->link_config.flowctrl);
13535 tg3_writephy(tp, MII_BMCR,
13536 BMCR_ANENABLE | BMCR_ANRESTART);
13541 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13542 err = tg3_init_5401phy_dsp(tp);
13546 err = tg3_init_5401phy_dsp(tp);
13552 static void __devinit tg3_read_vpd(struct tg3 *tp)
13555 unsigned int block_end, rosize, len;
13559 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13563 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13565 goto out_not_found;
13567 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13568 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13569 i += PCI_VPD_LRDT_TAG_SIZE;
13571 if (block_end > vpdlen)
13572 goto out_not_found;
13574 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13575 PCI_VPD_RO_KEYWORD_MFR_ID);
13577 len = pci_vpd_info_field_size(&vpd_data[j]);
13579 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13580 if (j + len > block_end || len != 4 ||
13581 memcmp(&vpd_data[j], "1028", 4))
13584 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13585 PCI_VPD_RO_KEYWORD_VENDOR0);
13589 len = pci_vpd_info_field_size(&vpd_data[j]);
13591 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13592 if (j + len > block_end)
13595 memcpy(tp->fw_ver, &vpd_data[j], len);
13596 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13600 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13601 PCI_VPD_RO_KEYWORD_PARTNO);
13603 goto out_not_found;
13605 len = pci_vpd_info_field_size(&vpd_data[i]);
13607 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13608 if (len > TG3_BPN_SIZE ||
13609 (len + i) > vpdlen)
13610 goto out_not_found;
13612 memcpy(tp->board_part_number, &vpd_data[i], len);
13616 if (tp->board_part_number[0])
13620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13621 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13622 strcpy(tp->board_part_number, "BCM5717");
13623 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13624 strcpy(tp->board_part_number, "BCM5718");
13627 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13628 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13629 strcpy(tp->board_part_number, "BCM57780");
13630 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13631 strcpy(tp->board_part_number, "BCM57760");
13632 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13633 strcpy(tp->board_part_number, "BCM57790");
13634 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13635 strcpy(tp->board_part_number, "BCM57788");
13638 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13639 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13640 strcpy(tp->board_part_number, "BCM57761");
13641 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13642 strcpy(tp->board_part_number, "BCM57765");
13643 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13644 strcpy(tp->board_part_number, "BCM57781");
13645 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13646 strcpy(tp->board_part_number, "BCM57785");
13647 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13648 strcpy(tp->board_part_number, "BCM57791");
13649 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13650 strcpy(tp->board_part_number, "BCM57795");
13653 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13654 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13655 strcpy(tp->board_part_number, "BCM57762");
13656 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13657 strcpy(tp->board_part_number, "BCM57766");
13658 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13659 strcpy(tp->board_part_number, "BCM57782");
13660 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13661 strcpy(tp->board_part_number, "BCM57786");
13664 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13665 strcpy(tp->board_part_number, "BCM95906");
13668 strcpy(tp->board_part_number, "none");
13672 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13676 if (tg3_nvram_read(tp, offset, &val) ||
13677 (val & 0xfc000000) != 0x0c000000 ||
13678 tg3_nvram_read(tp, offset + 4, &val) ||
13685 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13687 u32 val, offset, start, ver_offset;
13689 bool newver = false;
13691 if (tg3_nvram_read(tp, 0xc, &offset) ||
13692 tg3_nvram_read(tp, 0x4, &start))
13695 offset = tg3_nvram_logical_addr(tp, offset);
13697 if (tg3_nvram_read(tp, offset, &val))
13700 if ((val & 0xfc000000) == 0x0c000000) {
13701 if (tg3_nvram_read(tp, offset + 4, &val))
13708 dst_off = strlen(tp->fw_ver);
13711 if (TG3_VER_SIZE - dst_off < 16 ||
13712 tg3_nvram_read(tp, offset + 8, &ver_offset))
13715 offset = offset + ver_offset - start;
13716 for (i = 0; i < 16; i += 4) {
13718 if (tg3_nvram_read_be32(tp, offset + i, &v))
13721 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13726 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13729 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13730 TG3_NVM_BCVER_MAJSFT;
13731 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13732 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13733 "v%d.%02d", major, minor);
13737 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13739 u32 val, major, minor;
13741 /* Use native endian representation */
13742 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13745 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13746 TG3_NVM_HWSB_CFG1_MAJSFT;
13747 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13748 TG3_NVM_HWSB_CFG1_MINSFT;
13750 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13753 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13755 u32 offset, major, minor, build;
13757 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13759 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13762 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13763 case TG3_EEPROM_SB_REVISION_0:
13764 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13766 case TG3_EEPROM_SB_REVISION_2:
13767 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13769 case TG3_EEPROM_SB_REVISION_3:
13770 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13772 case TG3_EEPROM_SB_REVISION_4:
13773 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13775 case TG3_EEPROM_SB_REVISION_5:
13776 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13778 case TG3_EEPROM_SB_REVISION_6:
13779 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13785 if (tg3_nvram_read(tp, offset, &val))
13788 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13789 TG3_EEPROM_SB_EDH_BLD_SHFT;
13790 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13791 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13792 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13794 if (minor > 99 || build > 26)
13797 offset = strlen(tp->fw_ver);
13798 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13799 " v%d.%02d", major, minor);
13802 offset = strlen(tp->fw_ver);
13803 if (offset < TG3_VER_SIZE - 1)
13804 tp->fw_ver[offset] = 'a' + build - 1;
13808 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13810 u32 val, offset, start;
13813 for (offset = TG3_NVM_DIR_START;
13814 offset < TG3_NVM_DIR_END;
13815 offset += TG3_NVM_DIRENT_SIZE) {
13816 if (tg3_nvram_read(tp, offset, &val))
13819 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13823 if (offset == TG3_NVM_DIR_END)
13826 if (!tg3_flag(tp, 5705_PLUS))
13827 start = 0x08000000;
13828 else if (tg3_nvram_read(tp, offset - 4, &start))
13831 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13832 !tg3_fw_img_is_valid(tp, offset) ||
13833 tg3_nvram_read(tp, offset + 8, &val))
13836 offset += val - start;
13838 vlen = strlen(tp->fw_ver);
13840 tp->fw_ver[vlen++] = ',';
13841 tp->fw_ver[vlen++] = ' ';
13843 for (i = 0; i < 4; i++) {
13845 if (tg3_nvram_read_be32(tp, offset, &v))
13848 offset += sizeof(v);
13850 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13851 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13855 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13860 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13866 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13869 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13870 if (apedata != APE_SEG_SIG_MAGIC)
13873 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13874 if (!(apedata & APE_FW_STATUS_READY))
13877 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13879 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13880 tg3_flag_set(tp, APE_HAS_NCSI);
13886 vlen = strlen(tp->fw_ver);
13888 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13890 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13891 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13892 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13893 (apedata & APE_FW_VERSION_BLDMSK));
13896 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13899 bool vpd_vers = false;
13901 if (tp->fw_ver[0] != 0)
13904 if (tg3_flag(tp, NO_NVRAM)) {
13905 strcat(tp->fw_ver, "sb");
13909 if (tg3_nvram_read(tp, 0, &val))
13912 if (val == TG3_EEPROM_MAGIC)
13913 tg3_read_bc_ver(tp);
13914 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13915 tg3_read_sb_ver(tp, val);
13916 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13917 tg3_read_hwsb_ver(tp);
13924 if (tg3_flag(tp, ENABLE_APE)) {
13925 if (tg3_flag(tp, ENABLE_ASF))
13926 tg3_read_dash_ver(tp);
13927 } else if (tg3_flag(tp, ENABLE_ASF)) {
13928 tg3_read_mgmtfw_ver(tp);
13932 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13935 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13937 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13938 return TG3_RX_RET_MAX_SIZE_5717;
13939 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13940 return TG3_RX_RET_MAX_SIZE_5700;
13942 return TG3_RX_RET_MAX_SIZE_5705;
13945 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13946 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13947 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13948 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13952 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13954 struct pci_dev *peer;
13955 unsigned int func, devnr = tp->pdev->devfn & ~7;
13957 for (func = 0; func < 8; func++) {
13958 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13959 if (peer && peer != tp->pdev)
13963 /* 5704 can be configured in single-port mode, set peer to
13964 * tp->pdev in that case.
13972 * We don't need to keep the refcount elevated; there's no way
13973 * to remove one half of this device without removing the other
13980 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13982 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13986 /* All devices that use the alternate
13987 * ASIC REV location have a CPMU.
13989 tg3_flag_set(tp, CPMU_PRESENT);
13991 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13992 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13993 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13994 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13995 reg = TG3PCI_GEN2_PRODID_ASICREV;
13996 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13997 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13998 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13999 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14000 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14001 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14002 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14003 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14004 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14005 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14006 reg = TG3PCI_GEN15_PRODID_ASICREV;
14008 reg = TG3PCI_PRODID_ASICREV;
14010 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14013 /* Wrong chip ID in 5752 A0. This code can be removed later
14014 * as A0 is not in production.
14016 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14017 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14019 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14020 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14021 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14022 tg3_flag_set(tp, 5717_PLUS);
14024 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14025 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14026 tg3_flag_set(tp, 57765_CLASS);
14028 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14029 tg3_flag_set(tp, 57765_PLUS);
14031 /* Intentionally exclude ASIC_REV_5906 */
14032 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14038 tg3_flag(tp, 57765_PLUS))
14039 tg3_flag_set(tp, 5755_PLUS);
14041 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14042 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14043 tg3_flag_set(tp, 5780_CLASS);
14045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14048 tg3_flag(tp, 5755_PLUS) ||
14049 tg3_flag(tp, 5780_CLASS))
14050 tg3_flag_set(tp, 5750_PLUS);
14052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14053 tg3_flag(tp, 5750_PLUS))
14054 tg3_flag_set(tp, 5705_PLUS);
14057 static int __devinit tg3_get_invariants(struct tg3 *tp)
14060 u32 pci_state_reg, grc_misc_cfg;
14065 /* Force memory write invalidate off. If we leave it on,
14066 * then on 5700_BX chips we have to enable a workaround.
14067 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14068 * to match the cacheline size. The Broadcom driver have this
14069 * workaround but turns MWI off all the times so never uses
14070 * it. This seems to suggest that the workaround is insufficient.
14072 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14073 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14074 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14076 /* Important! -- Make sure register accesses are byteswapped
14077 * correctly. Also, for those chips that require it, make
14078 * sure that indirect register accesses are enabled before
14079 * the first operation.
14081 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14083 tp->misc_host_ctrl |= (misc_ctrl_reg &
14084 MISC_HOST_CTRL_CHIPREV);
14085 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14086 tp->misc_host_ctrl);
14088 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14090 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14091 * we need to disable memory and use config. cycles
14092 * only to access all registers. The 5702/03 chips
14093 * can mistakenly decode the special cycles from the
14094 * ICH chipsets as memory write cycles, causing corruption
14095 * of register and memory space. Only certain ICH bridges
14096 * will drive special cycles with non-zero data during the
14097 * address phase which can fall within the 5703's address
14098 * range. This is not an ICH bug as the PCI spec allows
14099 * non-zero address during special cycles. However, only
14100 * these ICH bridges are known to drive non-zero addresses
14101 * during special cycles.
14103 * Since special cycles do not cross PCI bridges, we only
14104 * enable this workaround if the 5703 is on the secondary
14105 * bus of these ICH bridges.
14107 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14108 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14109 static struct tg3_dev_id {
14113 } ich_chipsets[] = {
14114 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14116 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14118 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14120 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14124 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14125 struct pci_dev *bridge = NULL;
14127 while (pci_id->vendor != 0) {
14128 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14134 if (pci_id->rev != PCI_ANY_ID) {
14135 if (bridge->revision > pci_id->rev)
14138 if (bridge->subordinate &&
14139 (bridge->subordinate->number ==
14140 tp->pdev->bus->number)) {
14141 tg3_flag_set(tp, ICH_WORKAROUND);
14142 pci_dev_put(bridge);
14148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14149 static struct tg3_dev_id {
14152 } bridge_chipsets[] = {
14153 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14154 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14157 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14158 struct pci_dev *bridge = NULL;
14160 while (pci_id->vendor != 0) {
14161 bridge = pci_get_device(pci_id->vendor,
14168 if (bridge->subordinate &&
14169 (bridge->subordinate->number <=
14170 tp->pdev->bus->number) &&
14171 (bridge->subordinate->subordinate >=
14172 tp->pdev->bus->number)) {
14173 tg3_flag_set(tp, 5701_DMA_BUG);
14174 pci_dev_put(bridge);
14180 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14181 * DMA addresses > 40-bit. This bridge may have other additional
14182 * 57xx devices behind it in some 4-port NIC designs for example.
14183 * Any tg3 device found behind the bridge will also need the 40-bit
14186 if (tg3_flag(tp, 5780_CLASS)) {
14187 tg3_flag_set(tp, 40BIT_DMA_BUG);
14188 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14190 struct pci_dev *bridge = NULL;
14193 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14194 PCI_DEVICE_ID_SERVERWORKS_EPB,
14196 if (bridge && bridge->subordinate &&
14197 (bridge->subordinate->number <=
14198 tp->pdev->bus->number) &&
14199 (bridge->subordinate->subordinate >=
14200 tp->pdev->bus->number)) {
14201 tg3_flag_set(tp, 40BIT_DMA_BUG);
14202 pci_dev_put(bridge);
14208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14209 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14210 tp->pdev_peer = tg3_find_peer(tp);
14212 /* Determine TSO capabilities */
14213 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14214 ; /* Do nothing. HW bug. */
14215 else if (tg3_flag(tp, 57765_PLUS))
14216 tg3_flag_set(tp, HW_TSO_3);
14217 else if (tg3_flag(tp, 5755_PLUS) ||
14218 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14219 tg3_flag_set(tp, HW_TSO_2);
14220 else if (tg3_flag(tp, 5750_PLUS)) {
14221 tg3_flag_set(tp, HW_TSO_1);
14222 tg3_flag_set(tp, TSO_BUG);
14223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14224 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14225 tg3_flag_clear(tp, TSO_BUG);
14226 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14227 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14228 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14229 tg3_flag_set(tp, TSO_BUG);
14230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14231 tp->fw_needed = FIRMWARE_TG3TSO5;
14233 tp->fw_needed = FIRMWARE_TG3TSO;
14236 /* Selectively allow TSO based on operating conditions */
14237 if (tg3_flag(tp, HW_TSO_1) ||
14238 tg3_flag(tp, HW_TSO_2) ||
14239 tg3_flag(tp, HW_TSO_3) ||
14241 /* For firmware TSO, assume ASF is disabled.
14242 * We'll disable TSO later if we discover ASF
14243 * is enabled in tg3_get_eeprom_hw_cfg().
14245 tg3_flag_set(tp, TSO_CAPABLE);
14247 tg3_flag_clear(tp, TSO_CAPABLE);
14248 tg3_flag_clear(tp, TSO_BUG);
14249 tp->fw_needed = NULL;
14252 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14253 tp->fw_needed = FIRMWARE_TG3;
14257 if (tg3_flag(tp, 5750_PLUS)) {
14258 tg3_flag_set(tp, SUPPORT_MSI);
14259 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14260 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14261 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14262 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14263 tp->pdev_peer == tp->pdev))
14264 tg3_flag_clear(tp, SUPPORT_MSI);
14266 if (tg3_flag(tp, 5755_PLUS) ||
14267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14268 tg3_flag_set(tp, 1SHOT_MSI);
14271 if (tg3_flag(tp, 57765_PLUS)) {
14272 tg3_flag_set(tp, SUPPORT_MSIX);
14273 tp->irq_max = TG3_IRQ_MAX_VECS;
14274 tg3_rss_init_dflt_indir_tbl(tp);
14278 if (tg3_flag(tp, 5755_PLUS))
14279 tg3_flag_set(tp, SHORT_DMA_BUG);
14281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14282 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14284 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14285 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14287 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14289 if (tg3_flag(tp, 57765_PLUS) &&
14290 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14291 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14293 if (!tg3_flag(tp, 5705_PLUS) ||
14294 tg3_flag(tp, 5780_CLASS) ||
14295 tg3_flag(tp, USE_JUMBO_BDFLAG))
14296 tg3_flag_set(tp, JUMBO_CAPABLE);
14298 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14301 if (pci_is_pcie(tp->pdev)) {
14304 tg3_flag_set(tp, PCI_EXPRESS);
14306 pci_read_config_word(tp->pdev,
14307 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14309 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14310 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14312 tg3_flag_clear(tp, HW_TSO_2);
14313 tg3_flag_clear(tp, TSO_CAPABLE);
14315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14316 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14317 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14318 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14319 tg3_flag_set(tp, CLKREQ_BUG);
14320 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14321 tg3_flag_set(tp, L1PLLPD_EN);
14323 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14324 /* BCM5785 devices are effectively PCIe devices, and should
14325 * follow PCIe codepaths, but do not have a PCIe capabilities
14328 tg3_flag_set(tp, PCI_EXPRESS);
14329 } else if (!tg3_flag(tp, 5705_PLUS) ||
14330 tg3_flag(tp, 5780_CLASS)) {
14331 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14332 if (!tp->pcix_cap) {
14333 dev_err(&tp->pdev->dev,
14334 "Cannot find PCI-X capability, aborting\n");
14338 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14339 tg3_flag_set(tp, PCIX_MODE);
14342 /* If we have an AMD 762 or VIA K8T800 chipset, write
14343 * reordering to the mailbox registers done by the host
14344 * controller can cause major troubles. We read back from
14345 * every mailbox register write to force the writes to be
14346 * posted to the chip in order.
14348 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14349 !tg3_flag(tp, PCI_EXPRESS))
14350 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14352 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14353 &tp->pci_cacheline_sz);
14354 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14355 &tp->pci_lat_timer);
14356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14357 tp->pci_lat_timer < 64) {
14358 tp->pci_lat_timer = 64;
14359 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14360 tp->pci_lat_timer);
14363 /* Important! -- It is critical that the PCI-X hw workaround
14364 * situation is decided before the first MMIO register access.
14366 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14367 /* 5700 BX chips need to have their TX producer index
14368 * mailboxes written twice to workaround a bug.
14370 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14372 /* If we are in PCI-X mode, enable register write workaround.
14374 * The workaround is to use indirect register accesses
14375 * for all chip writes not to mailbox registers.
14377 if (tg3_flag(tp, PCIX_MODE)) {
14380 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14382 /* The chip can have it's power management PCI config
14383 * space registers clobbered due to this bug.
14384 * So explicitly force the chip into D0 here.
14386 pci_read_config_dword(tp->pdev,
14387 tp->pm_cap + PCI_PM_CTRL,
14389 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14390 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14391 pci_write_config_dword(tp->pdev,
14392 tp->pm_cap + PCI_PM_CTRL,
14395 /* Also, force SERR#/PERR# in PCI command. */
14396 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14397 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14398 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14402 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14403 tg3_flag_set(tp, PCI_HIGH_SPEED);
14404 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14405 tg3_flag_set(tp, PCI_32BIT);
14407 /* Chip-specific fixup from Broadcom driver */
14408 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14409 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14410 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14411 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14414 /* Default fast path register access methods */
14415 tp->read32 = tg3_read32;
14416 tp->write32 = tg3_write32;
14417 tp->read32_mbox = tg3_read32;
14418 tp->write32_mbox = tg3_write32;
14419 tp->write32_tx_mbox = tg3_write32;
14420 tp->write32_rx_mbox = tg3_write32;
14422 /* Various workaround register access methods */
14423 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14424 tp->write32 = tg3_write_indirect_reg32;
14425 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14426 (tg3_flag(tp, PCI_EXPRESS) &&
14427 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14429 * Back to back register writes can cause problems on these
14430 * chips, the workaround is to read back all reg writes
14431 * except those to mailbox regs.
14433 * See tg3_write_indirect_reg32().
14435 tp->write32 = tg3_write_flush_reg32;
14438 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14439 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14440 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14441 tp->write32_rx_mbox = tg3_write_flush_reg32;
14444 if (tg3_flag(tp, ICH_WORKAROUND)) {
14445 tp->read32 = tg3_read_indirect_reg32;
14446 tp->write32 = tg3_write_indirect_reg32;
14447 tp->read32_mbox = tg3_read_indirect_mbox;
14448 tp->write32_mbox = tg3_write_indirect_mbox;
14449 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14450 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14455 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14456 pci_cmd &= ~PCI_COMMAND_MEMORY;
14457 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14459 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14460 tp->read32_mbox = tg3_read32_mbox_5906;
14461 tp->write32_mbox = tg3_write32_mbox_5906;
14462 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14463 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14466 if (tp->write32 == tg3_write_indirect_reg32 ||
14467 (tg3_flag(tp, PCIX_MODE) &&
14468 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14469 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14470 tg3_flag_set(tp, SRAM_USE_CONFIG);
14472 /* The memory arbiter has to be enabled in order for SRAM accesses
14473 * to succeed. Normally on powerup the tg3 chip firmware will make
14474 * sure it is enabled, but other entities such as system netboot
14475 * code might disable it.
14477 val = tr32(MEMARB_MODE);
14478 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14480 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14481 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14482 tg3_flag(tp, 5780_CLASS)) {
14483 if (tg3_flag(tp, PCIX_MODE)) {
14484 pci_read_config_dword(tp->pdev,
14485 tp->pcix_cap + PCI_X_STATUS,
14487 tp->pci_fn = val & 0x7;
14489 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14490 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14491 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14492 NIC_SRAM_CPMUSTAT_SIG) {
14493 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14494 tp->pci_fn = tp->pci_fn ? 1 : 0;
14496 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14497 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14498 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14499 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14500 NIC_SRAM_CPMUSTAT_SIG) {
14501 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14502 TG3_CPMU_STATUS_FSHFT_5719;
14506 /* Get eeprom hw config before calling tg3_set_power_state().
14507 * In particular, the TG3_FLAG_IS_NIC flag must be
14508 * determined before calling tg3_set_power_state() so that
14509 * we know whether or not to switch out of Vaux power.
14510 * When the flag is set, it means that GPIO1 is used for eeprom
14511 * write protect and also implies that it is a LOM where GPIOs
14512 * are not used to switch power.
14514 tg3_get_eeprom_hw_cfg(tp);
14516 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14517 tg3_flag_clear(tp, TSO_CAPABLE);
14518 tg3_flag_clear(tp, TSO_BUG);
14519 tp->fw_needed = NULL;
14522 if (tg3_flag(tp, ENABLE_APE)) {
14523 /* Allow reads and writes to the
14524 * APE register and memory space.
14526 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14527 PCISTATE_ALLOW_APE_SHMEM_WR |
14528 PCISTATE_ALLOW_APE_PSPACE_WR;
14529 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14532 tg3_ape_lock_init(tp);
14535 /* Set up tp->grc_local_ctrl before calling
14536 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14537 * will bring 5700's external PHY out of reset.
14538 * It is also used as eeprom write protect on LOMs.
14540 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14542 tg3_flag(tp, EEPROM_WRITE_PROT))
14543 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14544 GRC_LCLCTRL_GPIO_OUTPUT1);
14545 /* Unused GPIO3 must be driven as output on 5752 because there
14546 * are no pull-up resistors on unused GPIO pins.
14548 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14549 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14551 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14553 tg3_flag(tp, 57765_CLASS))
14554 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14556 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14557 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14558 /* Turn off the debug UART. */
14559 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14560 if (tg3_flag(tp, IS_NIC))
14561 /* Keep VMain power. */
14562 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14563 GRC_LCLCTRL_GPIO_OUTPUT0;
14566 /* Switch out of Vaux if it is a NIC */
14567 tg3_pwrsrc_switch_to_vmain(tp);
14569 /* Derive initial jumbo mode from MTU assigned in
14570 * ether_setup() via the alloc_etherdev() call
14572 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14573 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14575 /* Determine WakeOnLan speed to use. */
14576 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14577 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14578 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14579 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14580 tg3_flag_clear(tp, WOL_SPEED_100MB);
14582 tg3_flag_set(tp, WOL_SPEED_100MB);
14585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14586 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14588 /* A few boards don't want Ethernet@WireSpeed phy feature */
14589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14590 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14591 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14592 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14593 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14594 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14595 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14597 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14598 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14599 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14600 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14601 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14603 if (tg3_flag(tp, 5705_PLUS) &&
14604 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14605 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14606 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14607 !tg3_flag(tp, 57765_PLUS)) {
14608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14609 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14610 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14612 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14613 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14614 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14615 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14616 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14618 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14622 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14623 tp->phy_otp = tg3_read_otp_phycfg(tp);
14624 if (tp->phy_otp == 0)
14625 tp->phy_otp = TG3_OTP_DEFAULT;
14628 if (tg3_flag(tp, CPMU_PRESENT))
14629 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14631 tp->mi_mode = MAC_MI_MODE_BASE;
14633 tp->coalesce_mode = 0;
14634 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14635 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14636 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14638 /* Set these bits to enable statistics workaround. */
14639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14640 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14641 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14642 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14643 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14647 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14648 tg3_flag_set(tp, USE_PHYLIB);
14650 err = tg3_mdio_init(tp);
14654 /* Initialize data/descriptor byte/word swapping. */
14655 val = tr32(GRC_MODE);
14656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14657 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14658 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14659 GRC_MODE_B2HRX_ENABLE |
14660 GRC_MODE_HTX2B_ENABLE |
14661 GRC_MODE_HOST_STACKUP);
14663 val &= GRC_MODE_HOST_STACKUP;
14665 tw32(GRC_MODE, val | tp->grc_mode);
14667 tg3_switch_clocks(tp);
14669 /* Clear this out for sanity. */
14670 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14672 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14674 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14675 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14676 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14678 if (chiprevid == CHIPREV_ID_5701_A0 ||
14679 chiprevid == CHIPREV_ID_5701_B0 ||
14680 chiprevid == CHIPREV_ID_5701_B2 ||
14681 chiprevid == CHIPREV_ID_5701_B5) {
14682 void __iomem *sram_base;
14684 /* Write some dummy words into the SRAM status block
14685 * area, see if it reads back correctly. If the return
14686 * value is bad, force enable the PCIX workaround.
14688 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14690 writel(0x00000000, sram_base);
14691 writel(0x00000000, sram_base + 4);
14692 writel(0xffffffff, sram_base + 4);
14693 if (readl(sram_base) != 0x00000000)
14694 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14699 tg3_nvram_init(tp);
14701 grc_misc_cfg = tr32(GRC_MISC_CFG);
14702 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14705 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14706 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14707 tg3_flag_set(tp, IS_5788);
14709 if (!tg3_flag(tp, IS_5788) &&
14710 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14711 tg3_flag_set(tp, TAGGED_STATUS);
14712 if (tg3_flag(tp, TAGGED_STATUS)) {
14713 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14714 HOSTCC_MODE_CLRTICK_TXBD);
14716 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14717 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14718 tp->misc_host_ctrl);
14721 /* Preserve the APE MAC_MODE bits */
14722 if (tg3_flag(tp, ENABLE_APE))
14723 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14727 /* these are limited to 10/100 only */
14728 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14729 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14730 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14731 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14732 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14733 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14734 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14735 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14736 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14737 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14738 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14739 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14740 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14741 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14742 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14743 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14745 err = tg3_phy_probe(tp);
14747 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14748 /* ... but do not return immediately ... */
14753 tg3_read_fw_ver(tp);
14755 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14756 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14759 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14761 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14764 /* 5700 {AX,BX} chips have a broken status block link
14765 * change bit implementation, so we must use the
14766 * status register in those cases.
14768 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14769 tg3_flag_set(tp, USE_LINKCHG_REG);
14771 tg3_flag_clear(tp, USE_LINKCHG_REG);
14773 /* The led_ctrl is set during tg3_phy_probe, here we might
14774 * have to force the link status polling mechanism based
14775 * upon subsystem IDs.
14777 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14778 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14779 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14780 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14781 tg3_flag_set(tp, USE_LINKCHG_REG);
14784 /* For all SERDES we poll the MAC status register. */
14785 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14786 tg3_flag_set(tp, POLL_SERDES);
14788 tg3_flag_clear(tp, POLL_SERDES);
14790 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14791 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14792 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14793 tg3_flag(tp, PCIX_MODE)) {
14794 tp->rx_offset = NET_SKB_PAD;
14795 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14796 tp->rx_copy_thresh = ~(u16)0;
14800 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14801 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14802 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14804 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14806 /* Increment the rx prod index on the rx std ring by at most
14807 * 8 for these chips to workaround hw errata.
14809 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14810 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14811 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14812 tp->rx_std_max_post = 8;
14814 if (tg3_flag(tp, ASPM_WORKAROUND))
14815 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14816 PCIE_PWR_MGMT_L1_THRESH_MSK;
14821 #ifdef CONFIG_SPARC
14822 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14824 struct net_device *dev = tp->dev;
14825 struct pci_dev *pdev = tp->pdev;
14826 struct device_node *dp = pci_device_to_OF_node(pdev);
14827 const unsigned char *addr;
14830 addr = of_get_property(dp, "local-mac-address", &len);
14831 if (addr && len == 6) {
14832 memcpy(dev->dev_addr, addr, 6);
14833 memcpy(dev->perm_addr, dev->dev_addr, 6);
14839 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14841 struct net_device *dev = tp->dev;
14843 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14844 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14849 static int __devinit tg3_get_device_address(struct tg3 *tp)
14851 struct net_device *dev = tp->dev;
14852 u32 hi, lo, mac_offset;
14855 #ifdef CONFIG_SPARC
14856 if (!tg3_get_macaddr_sparc(tp))
14861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14862 tg3_flag(tp, 5780_CLASS)) {
14863 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14865 if (tg3_nvram_lock(tp))
14866 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14868 tg3_nvram_unlock(tp);
14869 } else if (tg3_flag(tp, 5717_PLUS)) {
14870 if (tp->pci_fn & 1)
14872 if (tp->pci_fn > 1)
14873 mac_offset += 0x18c;
14874 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14877 /* First try to get it from MAC address mailbox. */
14878 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14879 if ((hi >> 16) == 0x484b) {
14880 dev->dev_addr[0] = (hi >> 8) & 0xff;
14881 dev->dev_addr[1] = (hi >> 0) & 0xff;
14883 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14884 dev->dev_addr[2] = (lo >> 24) & 0xff;
14885 dev->dev_addr[3] = (lo >> 16) & 0xff;
14886 dev->dev_addr[4] = (lo >> 8) & 0xff;
14887 dev->dev_addr[5] = (lo >> 0) & 0xff;
14889 /* Some old bootcode may report a 0 MAC address in SRAM */
14890 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14893 /* Next, try NVRAM. */
14894 if (!tg3_flag(tp, NO_NVRAM) &&
14895 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14896 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14897 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14898 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14900 /* Finally just fetch it out of the MAC control regs. */
14902 hi = tr32(MAC_ADDR_0_HIGH);
14903 lo = tr32(MAC_ADDR_0_LOW);
14905 dev->dev_addr[5] = lo & 0xff;
14906 dev->dev_addr[4] = (lo >> 8) & 0xff;
14907 dev->dev_addr[3] = (lo >> 16) & 0xff;
14908 dev->dev_addr[2] = (lo >> 24) & 0xff;
14909 dev->dev_addr[1] = hi & 0xff;
14910 dev->dev_addr[0] = (hi >> 8) & 0xff;
14914 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14915 #ifdef CONFIG_SPARC
14916 if (!tg3_get_default_macaddr_sparc(tp))
14921 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14925 #define BOUNDARY_SINGLE_CACHELINE 1
14926 #define BOUNDARY_MULTI_CACHELINE 2
14928 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14930 int cacheline_size;
14934 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14936 cacheline_size = 1024;
14938 cacheline_size = (int) byte * 4;
14940 /* On 5703 and later chips, the boundary bits have no
14943 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14944 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14945 !tg3_flag(tp, PCI_EXPRESS))
14948 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14949 goal = BOUNDARY_MULTI_CACHELINE;
14951 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14952 goal = BOUNDARY_SINGLE_CACHELINE;
14958 if (tg3_flag(tp, 57765_PLUS)) {
14959 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14966 /* PCI controllers on most RISC systems tend to disconnect
14967 * when a device tries to burst across a cache-line boundary.
14968 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14970 * Unfortunately, for PCI-E there are only limited
14971 * write-side controls for this, and thus for reads
14972 * we will still get the disconnects. We'll also waste
14973 * these PCI cycles for both read and write for chips
14974 * other than 5700 and 5701 which do not implement the
14977 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14978 switch (cacheline_size) {
14983 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14984 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14985 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14987 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14988 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14993 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14994 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14998 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14999 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15002 } else if (tg3_flag(tp, PCI_EXPRESS)) {
15003 switch (cacheline_size) {
15007 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15008 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15009 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15015 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15016 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15020 switch (cacheline_size) {
15022 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15023 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15024 DMA_RWCTRL_WRITE_BNDRY_16);
15029 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15030 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15031 DMA_RWCTRL_WRITE_BNDRY_32);
15036 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15037 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15038 DMA_RWCTRL_WRITE_BNDRY_64);
15043 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15044 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15045 DMA_RWCTRL_WRITE_BNDRY_128);
15050 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15051 DMA_RWCTRL_WRITE_BNDRY_256);
15054 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15055 DMA_RWCTRL_WRITE_BNDRY_512);
15059 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15060 DMA_RWCTRL_WRITE_BNDRY_1024);
15069 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15071 struct tg3_internal_buffer_desc test_desc;
15072 u32 sram_dma_descs;
15075 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15077 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15078 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15079 tw32(RDMAC_STATUS, 0);
15080 tw32(WDMAC_STATUS, 0);
15082 tw32(BUFMGR_MODE, 0);
15083 tw32(FTQ_RESET, 0);
15085 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15086 test_desc.addr_lo = buf_dma & 0xffffffff;
15087 test_desc.nic_mbuf = 0x00002100;
15088 test_desc.len = size;
15091 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15092 * the *second* time the tg3 driver was getting loaded after an
15095 * Broadcom tells me:
15096 * ...the DMA engine is connected to the GRC block and a DMA
15097 * reset may affect the GRC block in some unpredictable way...
15098 * The behavior of resets to individual blocks has not been tested.
15100 * Broadcom noted the GRC reset will also reset all sub-components.
15103 test_desc.cqid_sqid = (13 << 8) | 2;
15105 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15108 test_desc.cqid_sqid = (16 << 8) | 7;
15110 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15113 test_desc.flags = 0x00000005;
15115 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15118 val = *(((u32 *)&test_desc) + i);
15119 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15120 sram_dma_descs + (i * sizeof(u32)));
15121 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15123 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15126 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15128 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15131 for (i = 0; i < 40; i++) {
15135 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15137 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15138 if ((val & 0xffff) == sram_dma_descs) {
15149 #define TEST_BUFFER_SIZE 0x2000
15151 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15152 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15156 static int __devinit tg3_test_dma(struct tg3 *tp)
15158 dma_addr_t buf_dma;
15159 u32 *buf, saved_dma_rwctrl;
15162 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15163 &buf_dma, GFP_KERNEL);
15169 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15170 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15172 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15174 if (tg3_flag(tp, 57765_PLUS))
15177 if (tg3_flag(tp, PCI_EXPRESS)) {
15178 /* DMA read watermark not used on PCIE */
15179 tp->dma_rwctrl |= 0x00180000;
15180 } else if (!tg3_flag(tp, PCIX_MODE)) {
15181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15183 tp->dma_rwctrl |= 0x003f0000;
15185 tp->dma_rwctrl |= 0x003f000f;
15187 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15188 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15189 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15190 u32 read_water = 0x7;
15192 /* If the 5704 is behind the EPB bridge, we can
15193 * do the less restrictive ONE_DMA workaround for
15194 * better performance.
15196 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15197 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15198 tp->dma_rwctrl |= 0x8000;
15199 else if (ccval == 0x6 || ccval == 0x7)
15200 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15202 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15204 /* Set bit 23 to enable PCIX hw bug fix */
15206 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15207 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15209 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15210 /* 5780 always in PCIX mode */
15211 tp->dma_rwctrl |= 0x00144000;
15212 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15213 /* 5714 always in PCIX mode */
15214 tp->dma_rwctrl |= 0x00148000;
15216 tp->dma_rwctrl |= 0x001b000f;
15220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15222 tp->dma_rwctrl &= 0xfffffff0;
15224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15225 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15226 /* Remove this if it causes problems for some boards. */
15227 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15229 /* On 5700/5701 chips, we need to set this bit.
15230 * Otherwise the chip will issue cacheline transactions
15231 * to streamable DMA memory with not all the byte
15232 * enables turned on. This is an error on several
15233 * RISC PCI controllers, in particular sparc64.
15235 * On 5703/5704 chips, this bit has been reassigned
15236 * a different meaning. In particular, it is used
15237 * on those chips to enable a PCI-X workaround.
15239 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15242 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15245 /* Unneeded, already done by tg3_get_invariants. */
15246 tg3_switch_clocks(tp);
15249 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15250 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15253 /* It is best to perform DMA test with maximum write burst size
15254 * to expose the 5700/5701 write DMA bug.
15256 saved_dma_rwctrl = tp->dma_rwctrl;
15257 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15258 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15263 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15266 /* Send the buffer to the chip. */
15267 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15269 dev_err(&tp->pdev->dev,
15270 "%s: Buffer write failed. err = %d\n",
15276 /* validate data reached card RAM correctly. */
15277 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15279 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15280 if (le32_to_cpu(val) != p[i]) {
15281 dev_err(&tp->pdev->dev,
15282 "%s: Buffer corrupted on device! "
15283 "(%d != %d)\n", __func__, val, i);
15284 /* ret = -ENODEV here? */
15289 /* Now read it back. */
15290 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15292 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15293 "err = %d\n", __func__, ret);
15298 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15302 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15303 DMA_RWCTRL_WRITE_BNDRY_16) {
15304 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15305 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15306 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15309 dev_err(&tp->pdev->dev,
15310 "%s: Buffer corrupted on read back! "
15311 "(%d != %d)\n", __func__, p[i], i);
15317 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15323 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15324 DMA_RWCTRL_WRITE_BNDRY_16) {
15325 /* DMA test passed without adjusting DMA boundary,
15326 * now look for chipsets that are known to expose the
15327 * DMA bug without failing the test.
15329 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15330 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15331 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15333 /* Safe to use the calculated DMA boundary. */
15334 tp->dma_rwctrl = saved_dma_rwctrl;
15337 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15341 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15346 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15348 if (tg3_flag(tp, 57765_PLUS)) {
15349 tp->bufmgr_config.mbuf_read_dma_low_water =
15350 DEFAULT_MB_RDMA_LOW_WATER_5705;
15351 tp->bufmgr_config.mbuf_mac_rx_low_water =
15352 DEFAULT_MB_MACRX_LOW_WATER_57765;
15353 tp->bufmgr_config.mbuf_high_water =
15354 DEFAULT_MB_HIGH_WATER_57765;
15356 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15357 DEFAULT_MB_RDMA_LOW_WATER_5705;
15358 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15359 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15360 tp->bufmgr_config.mbuf_high_water_jumbo =
15361 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15362 } else if (tg3_flag(tp, 5705_PLUS)) {
15363 tp->bufmgr_config.mbuf_read_dma_low_water =
15364 DEFAULT_MB_RDMA_LOW_WATER_5705;
15365 tp->bufmgr_config.mbuf_mac_rx_low_water =
15366 DEFAULT_MB_MACRX_LOW_WATER_5705;
15367 tp->bufmgr_config.mbuf_high_water =
15368 DEFAULT_MB_HIGH_WATER_5705;
15369 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15370 tp->bufmgr_config.mbuf_mac_rx_low_water =
15371 DEFAULT_MB_MACRX_LOW_WATER_5906;
15372 tp->bufmgr_config.mbuf_high_water =
15373 DEFAULT_MB_HIGH_WATER_5906;
15376 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15377 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15378 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15379 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15380 tp->bufmgr_config.mbuf_high_water_jumbo =
15381 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15383 tp->bufmgr_config.mbuf_read_dma_low_water =
15384 DEFAULT_MB_RDMA_LOW_WATER;
15385 tp->bufmgr_config.mbuf_mac_rx_low_water =
15386 DEFAULT_MB_MACRX_LOW_WATER;
15387 tp->bufmgr_config.mbuf_high_water =
15388 DEFAULT_MB_HIGH_WATER;
15390 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15391 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15392 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15393 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15394 tp->bufmgr_config.mbuf_high_water_jumbo =
15395 DEFAULT_MB_HIGH_WATER_JUMBO;
15398 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15399 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15402 static char * __devinit tg3_phy_string(struct tg3 *tp)
15404 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15405 case TG3_PHY_ID_BCM5400: return "5400";
15406 case TG3_PHY_ID_BCM5401: return "5401";
15407 case TG3_PHY_ID_BCM5411: return "5411";
15408 case TG3_PHY_ID_BCM5701: return "5701";
15409 case TG3_PHY_ID_BCM5703: return "5703";
15410 case TG3_PHY_ID_BCM5704: return "5704";
15411 case TG3_PHY_ID_BCM5705: return "5705";
15412 case TG3_PHY_ID_BCM5750: return "5750";
15413 case TG3_PHY_ID_BCM5752: return "5752";
15414 case TG3_PHY_ID_BCM5714: return "5714";
15415 case TG3_PHY_ID_BCM5780: return "5780";
15416 case TG3_PHY_ID_BCM5755: return "5755";
15417 case TG3_PHY_ID_BCM5787: return "5787";
15418 case TG3_PHY_ID_BCM5784: return "5784";
15419 case TG3_PHY_ID_BCM5756: return "5722/5756";
15420 case TG3_PHY_ID_BCM5906: return "5906";
15421 case TG3_PHY_ID_BCM5761: return "5761";
15422 case TG3_PHY_ID_BCM5718C: return "5718C";
15423 case TG3_PHY_ID_BCM5718S: return "5718S";
15424 case TG3_PHY_ID_BCM57765: return "57765";
15425 case TG3_PHY_ID_BCM5719C: return "5719C";
15426 case TG3_PHY_ID_BCM5720C: return "5720C";
15427 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15428 case 0: return "serdes";
15429 default: return "unknown";
15433 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15435 if (tg3_flag(tp, PCI_EXPRESS)) {
15436 strcpy(str, "PCI Express");
15438 } else if (tg3_flag(tp, PCIX_MODE)) {
15439 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15441 strcpy(str, "PCIX:");
15443 if ((clock_ctrl == 7) ||
15444 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15445 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15446 strcat(str, "133MHz");
15447 else if (clock_ctrl == 0)
15448 strcat(str, "33MHz");
15449 else if (clock_ctrl == 2)
15450 strcat(str, "50MHz");
15451 else if (clock_ctrl == 4)
15452 strcat(str, "66MHz");
15453 else if (clock_ctrl == 6)
15454 strcat(str, "100MHz");
15456 strcpy(str, "PCI:");
15457 if (tg3_flag(tp, PCI_HIGH_SPEED))
15458 strcat(str, "66MHz");
15460 strcat(str, "33MHz");
15462 if (tg3_flag(tp, PCI_32BIT))
15463 strcat(str, ":32-bit");
15465 strcat(str, ":64-bit");
15469 static void __devinit tg3_init_coal(struct tg3 *tp)
15471 struct ethtool_coalesce *ec = &tp->coal;
15473 memset(ec, 0, sizeof(*ec));
15474 ec->cmd = ETHTOOL_GCOALESCE;
15475 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15476 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15477 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15478 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15479 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15480 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15481 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15482 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15483 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15485 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15486 HOSTCC_MODE_CLRTICK_TXBD)) {
15487 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15488 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15489 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15490 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15493 if (tg3_flag(tp, 5705_PLUS)) {
15494 ec->rx_coalesce_usecs_irq = 0;
15495 ec->tx_coalesce_usecs_irq = 0;
15496 ec->stats_block_coalesce_usecs = 0;
15500 static int __devinit tg3_init_one(struct pci_dev *pdev,
15501 const struct pci_device_id *ent)
15503 struct net_device *dev;
15505 int i, err, pm_cap;
15506 u32 sndmbx, rcvmbx, intmbx;
15508 u64 dma_mask, persist_dma_mask;
15509 netdev_features_t features = 0;
15511 printk_once(KERN_INFO "%s\n", version);
15513 err = pci_enable_device(pdev);
15515 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15519 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15521 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15522 goto err_out_disable_pdev;
15525 pci_set_master(pdev);
15527 /* Find power-management capability. */
15528 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15530 dev_err(&pdev->dev,
15531 "Cannot find Power Management capability, aborting\n");
15533 goto err_out_free_res;
15536 err = pci_set_power_state(pdev, PCI_D0);
15538 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15539 goto err_out_free_res;
15542 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15545 goto err_out_power_down;
15548 SET_NETDEV_DEV(dev, &pdev->dev);
15550 tp = netdev_priv(dev);
15553 tp->pm_cap = pm_cap;
15554 tp->rx_mode = TG3_DEF_RX_MODE;
15555 tp->tx_mode = TG3_DEF_TX_MODE;
15558 tp->msg_enable = tg3_debug;
15560 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15562 /* The word/byte swap controls here control register access byte
15563 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15566 tp->misc_host_ctrl =
15567 MISC_HOST_CTRL_MASK_PCI_INT |
15568 MISC_HOST_CTRL_WORD_SWAP |
15569 MISC_HOST_CTRL_INDIR_ACCESS |
15570 MISC_HOST_CTRL_PCISTATE_RW;
15572 /* The NONFRM (non-frame) byte/word swap controls take effect
15573 * on descriptor entries, anything which isn't packet data.
15575 * The StrongARM chips on the board (one for tx, one for rx)
15576 * are running in big-endian mode.
15578 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15579 GRC_MODE_WSWAP_NONFRM_DATA);
15580 #ifdef __BIG_ENDIAN
15581 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15583 spin_lock_init(&tp->lock);
15584 spin_lock_init(&tp->indirect_lock);
15585 INIT_WORK(&tp->reset_task, tg3_reset_task);
15587 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15589 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15591 goto err_out_free_dev;
15594 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15595 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15596 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15597 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15598 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15599 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15600 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15601 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15602 tg3_flag_set(tp, ENABLE_APE);
15603 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15604 if (!tp->aperegs) {
15605 dev_err(&pdev->dev,
15606 "Cannot map APE registers, aborting\n");
15608 goto err_out_iounmap;
15612 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15613 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15615 dev->ethtool_ops = &tg3_ethtool_ops;
15616 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15617 dev->netdev_ops = &tg3_netdev_ops;
15618 dev->irq = pdev->irq;
15620 err = tg3_get_invariants(tp);
15622 dev_err(&pdev->dev,
15623 "Problem fetching invariants of chip, aborting\n");
15624 goto err_out_apeunmap;
15627 /* The EPB bridge inside 5714, 5715, and 5780 and any
15628 * device behind the EPB cannot support DMA addresses > 40-bit.
15629 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15630 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15631 * do DMA address check in tg3_start_xmit().
15633 if (tg3_flag(tp, IS_5788))
15634 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15635 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15636 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15637 #ifdef CONFIG_HIGHMEM
15638 dma_mask = DMA_BIT_MASK(64);
15641 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15643 /* Configure DMA attributes. */
15644 if (dma_mask > DMA_BIT_MASK(32)) {
15645 err = pci_set_dma_mask(pdev, dma_mask);
15647 features |= NETIF_F_HIGHDMA;
15648 err = pci_set_consistent_dma_mask(pdev,
15651 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15652 "DMA for consistent allocations\n");
15653 goto err_out_apeunmap;
15657 if (err || dma_mask == DMA_BIT_MASK(32)) {
15658 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15660 dev_err(&pdev->dev,
15661 "No usable DMA configuration, aborting\n");
15662 goto err_out_apeunmap;
15666 tg3_init_bufmgr_config(tp);
15668 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15670 /* 5700 B0 chips do not support checksumming correctly due
15671 * to hardware bugs.
15673 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15674 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15676 if (tg3_flag(tp, 5755_PLUS))
15677 features |= NETIF_F_IPV6_CSUM;
15680 /* TSO is on by default on chips that support hardware TSO.
15681 * Firmware TSO on older chips gives lower performance, so it
15682 * is off by default, but can be enabled using ethtool.
15684 if ((tg3_flag(tp, HW_TSO_1) ||
15685 tg3_flag(tp, HW_TSO_2) ||
15686 tg3_flag(tp, HW_TSO_3)) &&
15687 (features & NETIF_F_IP_CSUM))
15688 features |= NETIF_F_TSO;
15689 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15690 if (features & NETIF_F_IPV6_CSUM)
15691 features |= NETIF_F_TSO6;
15692 if (tg3_flag(tp, HW_TSO_3) ||
15693 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15694 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15695 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15696 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15697 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15698 features |= NETIF_F_TSO_ECN;
15701 dev->features |= features;
15702 dev->vlan_features |= features;
15705 * Add loopback capability only for a subset of devices that support
15706 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15707 * loopback for the remaining devices.
15709 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15710 !tg3_flag(tp, CPMU_PRESENT))
15711 /* Add the loopback capability */
15712 features |= NETIF_F_LOOPBACK;
15714 dev->hw_features |= features;
15716 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15717 !tg3_flag(tp, TSO_CAPABLE) &&
15718 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15719 tg3_flag_set(tp, MAX_RXPEND_64);
15720 tp->rx_pending = 63;
15723 err = tg3_get_device_address(tp);
15725 dev_err(&pdev->dev,
15726 "Could not obtain valid ethernet address, aborting\n");
15727 goto err_out_apeunmap;
15731 * Reset chip in case UNDI or EFI driver did not shutdown
15732 * DMA self test will enable WDMAC and we'll see (spurious)
15733 * pending DMA on the PCI bus at that point.
15735 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15736 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15737 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15738 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15741 err = tg3_test_dma(tp);
15743 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15744 goto err_out_apeunmap;
15747 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15748 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15749 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15750 for (i = 0; i < tp->irq_max; i++) {
15751 struct tg3_napi *tnapi = &tp->napi[i];
15754 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15756 tnapi->int_mbox = intmbx;
15762 tnapi->consmbox = rcvmbx;
15763 tnapi->prodmbox = sndmbx;
15766 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15768 tnapi->coal_now = HOSTCC_MODE_NOW;
15770 if (!tg3_flag(tp, SUPPORT_MSIX))
15774 * If we support MSIX, we'll be using RSS. If we're using
15775 * RSS, the first vector only handles link interrupts and the
15776 * remaining vectors handle rx and tx interrupts. Reuse the
15777 * mailbox values for the next iteration. The values we setup
15778 * above are still useful for the single vectored mode.
15793 pci_set_drvdata(pdev, dev);
15795 if (tg3_flag(tp, 5717_PLUS)) {
15796 /* Resume a low-power mode */
15797 tg3_frob_aux_power(tp, false);
15800 tg3_timer_init(tp);
15802 err = register_netdev(dev);
15804 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15805 goto err_out_apeunmap;
15808 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15809 tp->board_part_number,
15810 tp->pci_chip_rev_id,
15811 tg3_bus_string(tp, str),
15814 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15815 struct phy_device *phydev;
15816 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15818 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15819 phydev->drv->name, dev_name(&phydev->dev));
15823 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15824 ethtype = "10/100Base-TX";
15825 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15826 ethtype = "1000Base-SX";
15828 ethtype = "10/100/1000Base-T";
15830 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15831 "(WireSpeed[%d], EEE[%d])\n",
15832 tg3_phy_string(tp), ethtype,
15833 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15834 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15837 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15838 (dev->features & NETIF_F_RXCSUM) != 0,
15839 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15840 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15841 tg3_flag(tp, ENABLE_ASF) != 0,
15842 tg3_flag(tp, TSO_CAPABLE) != 0);
15843 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15845 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15846 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15848 pci_save_state(pdev);
15854 iounmap(tp->aperegs);
15855 tp->aperegs = NULL;
15867 err_out_power_down:
15868 pci_set_power_state(pdev, PCI_D3hot);
15871 pci_release_regions(pdev);
15873 err_out_disable_pdev:
15874 pci_disable_device(pdev);
15875 pci_set_drvdata(pdev, NULL);
15879 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15881 struct net_device *dev = pci_get_drvdata(pdev);
15884 struct tg3 *tp = netdev_priv(dev);
15886 release_firmware(tp->fw);
15888 tg3_reset_task_cancel(tp);
15890 if (tg3_flag(tp, USE_PHYLIB)) {
15895 unregister_netdev(dev);
15897 iounmap(tp->aperegs);
15898 tp->aperegs = NULL;
15905 pci_release_regions(pdev);
15906 pci_disable_device(pdev);
15907 pci_set_drvdata(pdev, NULL);
15911 #ifdef CONFIG_PM_SLEEP
15912 static int tg3_suspend(struct device *device)
15914 struct pci_dev *pdev = to_pci_dev(device);
15915 struct net_device *dev = pci_get_drvdata(pdev);
15916 struct tg3 *tp = netdev_priv(dev);
15919 if (!netif_running(dev))
15922 tg3_reset_task_cancel(tp);
15924 tg3_netif_stop(tp);
15926 tg3_timer_stop(tp);
15928 tg3_full_lock(tp, 1);
15929 tg3_disable_ints(tp);
15930 tg3_full_unlock(tp);
15932 netif_device_detach(dev);
15934 tg3_full_lock(tp, 0);
15935 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15936 tg3_flag_clear(tp, INIT_COMPLETE);
15937 tg3_full_unlock(tp);
15939 err = tg3_power_down_prepare(tp);
15943 tg3_full_lock(tp, 0);
15945 tg3_flag_set(tp, INIT_COMPLETE);
15946 err2 = tg3_restart_hw(tp, 1);
15950 tg3_timer_start(tp);
15952 netif_device_attach(dev);
15953 tg3_netif_start(tp);
15956 tg3_full_unlock(tp);
15965 static int tg3_resume(struct device *device)
15967 struct pci_dev *pdev = to_pci_dev(device);
15968 struct net_device *dev = pci_get_drvdata(pdev);
15969 struct tg3 *tp = netdev_priv(dev);
15972 if (!netif_running(dev))
15975 netif_device_attach(dev);
15977 tg3_full_lock(tp, 0);
15979 tg3_flag_set(tp, INIT_COMPLETE);
15980 err = tg3_restart_hw(tp, 1);
15984 tg3_timer_start(tp);
15986 tg3_netif_start(tp);
15989 tg3_full_unlock(tp);
15997 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15998 #define TG3_PM_OPS (&tg3_pm_ops)
16002 #define TG3_PM_OPS NULL
16004 #endif /* CONFIG_PM_SLEEP */
16007 * tg3_io_error_detected - called when PCI error is detected
16008 * @pdev: Pointer to PCI device
16009 * @state: The current pci connection state
16011 * This function is called after a PCI bus error affecting
16012 * this device has been detected.
16014 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16015 pci_channel_state_t state)
16017 struct net_device *netdev = pci_get_drvdata(pdev);
16018 struct tg3 *tp = netdev_priv(netdev);
16019 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16021 netdev_info(netdev, "PCI I/O error detected\n");
16025 if (!netif_running(netdev))
16030 tg3_netif_stop(tp);
16032 tg3_timer_stop(tp);
16034 /* Want to make sure that the reset task doesn't run */
16035 tg3_reset_task_cancel(tp);
16037 netif_device_detach(netdev);
16039 /* Clean up software state, even if MMIO is blocked */
16040 tg3_full_lock(tp, 0);
16041 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16042 tg3_full_unlock(tp);
16045 if (state == pci_channel_io_perm_failure)
16046 err = PCI_ERS_RESULT_DISCONNECT;
16048 pci_disable_device(pdev);
16056 * tg3_io_slot_reset - called after the pci bus has been reset.
16057 * @pdev: Pointer to PCI device
16059 * Restart the card from scratch, as if from a cold-boot.
16060 * At this point, the card has exprienced a hard reset,
16061 * followed by fixups by BIOS, and has its config space
16062 * set up identically to what it was at cold boot.
16064 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16066 struct net_device *netdev = pci_get_drvdata(pdev);
16067 struct tg3 *tp = netdev_priv(netdev);
16068 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16073 if (pci_enable_device(pdev)) {
16074 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16078 pci_set_master(pdev);
16079 pci_restore_state(pdev);
16080 pci_save_state(pdev);
16082 if (!netif_running(netdev)) {
16083 rc = PCI_ERS_RESULT_RECOVERED;
16087 err = tg3_power_up(tp);
16091 rc = PCI_ERS_RESULT_RECOVERED;
16100 * tg3_io_resume - called when traffic can start flowing again.
16101 * @pdev: Pointer to PCI device
16103 * This callback is called when the error recovery driver tells
16104 * us that its OK to resume normal operation.
16106 static void tg3_io_resume(struct pci_dev *pdev)
16108 struct net_device *netdev = pci_get_drvdata(pdev);
16109 struct tg3 *tp = netdev_priv(netdev);
16114 if (!netif_running(netdev))
16117 tg3_full_lock(tp, 0);
16118 tg3_flag_set(tp, INIT_COMPLETE);
16119 err = tg3_restart_hw(tp, 1);
16120 tg3_full_unlock(tp);
16122 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16126 netif_device_attach(netdev);
16128 tg3_timer_start(tp);
16130 tg3_netif_start(tp);
16138 static struct pci_error_handlers tg3_err_handler = {
16139 .error_detected = tg3_io_error_detected,
16140 .slot_reset = tg3_io_slot_reset,
16141 .resume = tg3_io_resume
16144 static struct pci_driver tg3_driver = {
16145 .name = DRV_MODULE_NAME,
16146 .id_table = tg3_pci_tbl,
16147 .probe = tg3_init_one,
16148 .remove = __devexit_p(tg3_remove_one),
16149 .err_handler = &tg3_err_handler,
16150 .driver.pm = TG3_PM_OPS,
16153 static int __init tg3_init(void)
16155 return pci_register_driver(&tg3_driver);
16158 static void __exit tg3_cleanup(void)
16160 pci_unregister_driver(&tg3_driver);
16163 module_init(tg3_init);
16164 module_exit(tg3_cleanup);