net: introduce and use netdev_features_t for device features sets
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     121
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "November 2, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138 #define TG3_RSS_INDIR_TBL_SIZE          128
139
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146
147 #define TG3_TX_RING_SIZE                512
148 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
149
150 #define TG3_RX_STD_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
157                                  TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
160 #define TG3_DMA_BYTE_ENAB               64
161
162 #define TG3_RX_STD_DMA_SZ               1536
163 #define TG3_RX_JMB_DMA_SZ               9046
164
165 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
166
167 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD           256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
190 #else
191         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
192 #endif
193
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)       0
198 #endif
199
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX               4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458         u32 reg;
1459         u32 val;
1460
1461         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462                 return;
1463
1464         tg3_wait_for_event_ack(tp);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467
1468         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469
1470         val = 0;
1471         if (!tg3_readphy(tp, MII_BMCR, &reg))
1472                 val = reg << 16;
1473         if (!tg3_readphy(tp, MII_BMSR, &reg))
1474                 val |= (reg & 0xffff);
1475         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476
1477         val = 0;
1478         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479                 val = reg << 16;
1480         if (!tg3_readphy(tp, MII_LPA, &reg))
1481                 val |= (reg & 0xffff);
1482         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483
1484         val = 0;
1485         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487                         val = reg << 16;
1488                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489                         val |= (reg & 0xffff);
1490         }
1491         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492
1493         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494                 val = reg << 16;
1495         else
1496                 val = 0;
1497         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498
1499         tg3_generate_fw_event(tp);
1500 }
1501
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506                 /* Wait for RX cpu to ACK the previous event. */
1507                 tg3_wait_for_event_ack(tp);
1508
1509                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510
1511                 tg3_generate_fw_event(tp);
1512
1513                 /* Wait for RX cpu to ACK this event. */
1514                 tg3_wait_for_event_ack(tp);
1515         }
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523
1524         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525                 switch (kind) {
1526                 case RESET_KIND_INIT:
1527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528                                       DRV_STATE_START);
1529                         break;
1530
1531                 case RESET_KIND_SHUTDOWN:
1532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533                                       DRV_STATE_UNLOAD);
1534                         break;
1535
1536                 case RESET_KIND_SUSPEND:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_SUSPEND);
1539                         break;
1540
1541                 default:
1542                         break;
1543                 }
1544         }
1545
1546         if (kind == RESET_KIND_INIT ||
1547             kind == RESET_KIND_SUSPEND)
1548                 tg3_ape_driver_state_change(tp, kind);
1549 }
1550
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555                 switch (kind) {
1556                 case RESET_KIND_INIT:
1557                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558                                       DRV_STATE_START_DONE);
1559                         break;
1560
1561                 case RESET_KIND_SHUTDOWN:
1562                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563                                       DRV_STATE_UNLOAD_DONE);
1564                         break;
1565
1566                 default:
1567                         break;
1568                 }
1569         }
1570
1571         if (kind == RESET_KIND_SHUTDOWN)
1572                 tg3_ape_driver_state_change(tp, kind);
1573 }
1574
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578         if (tg3_flag(tp, ENABLE_ASF)) {
1579                 switch (kind) {
1580                 case RESET_KIND_INIT:
1581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582                                       DRV_STATE_START);
1583                         break;
1584
1585                 case RESET_KIND_SHUTDOWN:
1586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587                                       DRV_STATE_UNLOAD);
1588                         break;
1589
1590                 case RESET_KIND_SUSPEND:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_SUSPEND);
1593                         break;
1594
1595                 default:
1596                         break;
1597                 }
1598         }
1599 }
1600
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603         int i;
1604         u32 val;
1605
1606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607                 /* Wait up to 20ms for init done. */
1608                 for (i = 0; i < 200; i++) {
1609                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610                                 return 0;
1611                         udelay(100);
1612                 }
1613                 return -ENODEV;
1614         }
1615
1616         /* Wait for firmware initialization to complete. */
1617         for (i = 0; i < 100000; i++) {
1618                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620                         break;
1621                 udelay(10);
1622         }
1623
1624         /* Chip might not be fitted with firmware.  Some Sun onboard
1625          * parts are configured like that.  So don't signal the timeout
1626          * of the above loop as an error, but do report the lack of
1627          * running firmware once.
1628          */
1629         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1631
1632                 netdev_info(tp->dev, "No firmware running\n");
1633         }
1634
1635         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636                 /* The 57765 A0 needs a little more
1637                  * time to do some important work.
1638                  */
1639                 mdelay(10);
1640         }
1641
1642         return 0;
1643 }
1644
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647         if (!netif_carrier_ok(tp->dev)) {
1648                 netif_info(tp, link, tp->dev, "Link is down\n");
1649                 tg3_ump_link_report(tp);
1650         } else if (netif_msg_link(tp)) {
1651                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652                             (tp->link_config.active_speed == SPEED_1000 ?
1653                              1000 :
1654                              (tp->link_config.active_speed == SPEED_100 ?
1655                               100 : 10)),
1656                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1657                              "full" : "half"));
1658
1659                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661                             "on" : "off",
1662                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663                             "on" : "off");
1664
1665                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666                         netdev_info(tp->dev, "EEE is %s\n",
1667                                     tp->setlpicnt ? "enabled" : "disabled");
1668
1669                 tg3_ump_link_report(tp);
1670         }
1671 }
1672
1673 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1674 {
1675         u16 miireg;
1676
1677         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678                 miireg = ADVERTISE_PAUSE_CAP;
1679         else if (flow_ctrl & FLOW_CTRL_TX)
1680                 miireg = ADVERTISE_PAUSE_ASYM;
1681         else if (flow_ctrl & FLOW_CTRL_RX)
1682                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1683         else
1684                 miireg = 0;
1685
1686         return miireg;
1687 }
1688
1689 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1690 {
1691         u16 miireg;
1692
1693         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1694                 miireg = ADVERTISE_1000XPAUSE;
1695         else if (flow_ctrl & FLOW_CTRL_TX)
1696                 miireg = ADVERTISE_1000XPSE_ASYM;
1697         else if (flow_ctrl & FLOW_CTRL_RX)
1698                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1699         else
1700                 miireg = 0;
1701
1702         return miireg;
1703 }
1704
1705 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1706 {
1707         u8 cap = 0;
1708
1709         if (lcladv & ADVERTISE_1000XPAUSE) {
1710                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1711                         if (rmtadv & LPA_1000XPAUSE)
1712                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1713                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1714                                 cap = FLOW_CTRL_RX;
1715                 } else {
1716                         if (rmtadv & LPA_1000XPAUSE)
1717                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1718                 }
1719         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1720                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1721                         cap = FLOW_CTRL_TX;
1722         }
1723
1724         return cap;
1725 }
1726
1727 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1728 {
1729         u8 autoneg;
1730         u8 flowctrl = 0;
1731         u32 old_rx_mode = tp->rx_mode;
1732         u32 old_tx_mode = tp->tx_mode;
1733
1734         if (tg3_flag(tp, USE_PHYLIB))
1735                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1736         else
1737                 autoneg = tp->link_config.autoneg;
1738
1739         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1740                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1741                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1742                 else
1743                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1744         } else
1745                 flowctrl = tp->link_config.flowctrl;
1746
1747         tp->link_config.active_flowctrl = flowctrl;
1748
1749         if (flowctrl & FLOW_CTRL_RX)
1750                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1751         else
1752                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1753
1754         if (old_rx_mode != tp->rx_mode)
1755                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1756
1757         if (flowctrl & FLOW_CTRL_TX)
1758                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1759         else
1760                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1761
1762         if (old_tx_mode != tp->tx_mode)
1763                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1764 }
1765
1766 static void tg3_adjust_link(struct net_device *dev)
1767 {
1768         u8 oldflowctrl, linkmesg = 0;
1769         u32 mac_mode, lcl_adv, rmt_adv;
1770         struct tg3 *tp = netdev_priv(dev);
1771         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1772
1773         spin_lock_bh(&tp->lock);
1774
1775         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1776                                     MAC_MODE_HALF_DUPLEX);
1777
1778         oldflowctrl = tp->link_config.active_flowctrl;
1779
1780         if (phydev->link) {
1781                 lcl_adv = 0;
1782                 rmt_adv = 0;
1783
1784                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1785                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1786                 else if (phydev->speed == SPEED_1000 ||
1787                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1788                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1789                 else
1790                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1791
1792                 if (phydev->duplex == DUPLEX_HALF)
1793                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1794                 else {
1795                         lcl_adv = tg3_advert_flowctrl_1000T(
1796                                   tp->link_config.flowctrl);
1797
1798                         if (phydev->pause)
1799                                 rmt_adv = LPA_PAUSE_CAP;
1800                         if (phydev->asym_pause)
1801                                 rmt_adv |= LPA_PAUSE_ASYM;
1802                 }
1803
1804                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1805         } else
1806                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1807
1808         if (mac_mode != tp->mac_mode) {
1809                 tp->mac_mode = mac_mode;
1810                 tw32_f(MAC_MODE, tp->mac_mode);
1811                 udelay(40);
1812         }
1813
1814         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1815                 if (phydev->speed == SPEED_10)
1816                         tw32(MAC_MI_STAT,
1817                              MAC_MI_STAT_10MBPS_MODE |
1818                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1819                 else
1820                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1821         }
1822
1823         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1824                 tw32(MAC_TX_LENGTHS,
1825                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1826                       (6 << TX_LENGTHS_IPG_SHIFT) |
1827                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1828         else
1829                 tw32(MAC_TX_LENGTHS,
1830                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1831                       (6 << TX_LENGTHS_IPG_SHIFT) |
1832                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1833
1834         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1835             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1836             phydev->speed != tp->link_config.active_speed ||
1837             phydev->duplex != tp->link_config.active_duplex ||
1838             oldflowctrl != tp->link_config.active_flowctrl)
1839                 linkmesg = 1;
1840
1841         tp->link_config.active_speed = phydev->speed;
1842         tp->link_config.active_duplex = phydev->duplex;
1843
1844         spin_unlock_bh(&tp->lock);
1845
1846         if (linkmesg)
1847                 tg3_link_report(tp);
1848 }
1849
1850 static int tg3_phy_init(struct tg3 *tp)
1851 {
1852         struct phy_device *phydev;
1853
1854         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1855                 return 0;
1856
1857         /* Bring the PHY back to a known state. */
1858         tg3_bmcr_reset(tp);
1859
1860         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1861
1862         /* Attach the MAC to the PHY. */
1863         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1864                              phydev->dev_flags, phydev->interface);
1865         if (IS_ERR(phydev)) {
1866                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1867                 return PTR_ERR(phydev);
1868         }
1869
1870         /* Mask with MAC supported features. */
1871         switch (phydev->interface) {
1872         case PHY_INTERFACE_MODE_GMII:
1873         case PHY_INTERFACE_MODE_RGMII:
1874                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1875                         phydev->supported &= (PHY_GBIT_FEATURES |
1876                                               SUPPORTED_Pause |
1877                                               SUPPORTED_Asym_Pause);
1878                         break;
1879                 }
1880                 /* fallthru */
1881         case PHY_INTERFACE_MODE_MII:
1882                 phydev->supported &= (PHY_BASIC_FEATURES |
1883                                       SUPPORTED_Pause |
1884                                       SUPPORTED_Asym_Pause);
1885                 break;
1886         default:
1887                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1888                 return -EINVAL;
1889         }
1890
1891         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1892
1893         phydev->advertising = phydev->supported;
1894
1895         return 0;
1896 }
1897
1898 static void tg3_phy_start(struct tg3 *tp)
1899 {
1900         struct phy_device *phydev;
1901
1902         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1903                 return;
1904
1905         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1906
1907         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1908                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1909                 phydev->speed = tp->link_config.orig_speed;
1910                 phydev->duplex = tp->link_config.orig_duplex;
1911                 phydev->autoneg = tp->link_config.orig_autoneg;
1912                 phydev->advertising = tp->link_config.orig_advertising;
1913         }
1914
1915         phy_start(phydev);
1916
1917         phy_start_aneg(phydev);
1918 }
1919
1920 static void tg3_phy_stop(struct tg3 *tp)
1921 {
1922         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1923                 return;
1924
1925         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1926 }
1927
1928 static void tg3_phy_fini(struct tg3 *tp)
1929 {
1930         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1931                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1932                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1933         }
1934 }
1935
1936 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1937 {
1938         int err;
1939         u32 val;
1940
1941         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1942                 return 0;
1943
1944         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1945                 /* Cannot do read-modify-write on 5401 */
1946                 err = tg3_phy_auxctl_write(tp,
1947                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1948                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1949                                            0x4c20);
1950                 goto done;
1951         }
1952
1953         err = tg3_phy_auxctl_read(tp,
1954                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1955         if (err)
1956                 return err;
1957
1958         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1959         err = tg3_phy_auxctl_write(tp,
1960                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1961
1962 done:
1963         return err;
1964 }
1965
1966 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1967 {
1968         u32 phytest;
1969
1970         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1971                 u32 phy;
1972
1973                 tg3_writephy(tp, MII_TG3_FET_TEST,
1974                              phytest | MII_TG3_FET_SHADOW_EN);
1975                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1976                         if (enable)
1977                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1978                         else
1979                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1980                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1981                 }
1982                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1983         }
1984 }
1985
1986 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1987 {
1988         u32 reg;
1989
1990         if (!tg3_flag(tp, 5705_PLUS) ||
1991             (tg3_flag(tp, 5717_PLUS) &&
1992              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1993                 return;
1994
1995         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1996                 tg3_phy_fet_toggle_apd(tp, enable);
1997                 return;
1998         }
1999
2000         reg = MII_TG3_MISC_SHDW_WREN |
2001               MII_TG3_MISC_SHDW_SCR5_SEL |
2002               MII_TG3_MISC_SHDW_SCR5_LPED |
2003               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2004               MII_TG3_MISC_SHDW_SCR5_SDTL |
2005               MII_TG3_MISC_SHDW_SCR5_C125OE;
2006         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2007                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2008
2009         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2010
2011
2012         reg = MII_TG3_MISC_SHDW_WREN |
2013               MII_TG3_MISC_SHDW_APD_SEL |
2014               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2015         if (enable)
2016                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2017
2018         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2019 }
2020
2021 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2022 {
2023         u32 phy;
2024
2025         if (!tg3_flag(tp, 5705_PLUS) ||
2026             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2027                 return;
2028
2029         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2030                 u32 ephy;
2031
2032                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2033                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2034
2035                         tg3_writephy(tp, MII_TG3_FET_TEST,
2036                                      ephy | MII_TG3_FET_SHADOW_EN);
2037                         if (!tg3_readphy(tp, reg, &phy)) {
2038                                 if (enable)
2039                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2040                                 else
2041                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2042                                 tg3_writephy(tp, reg, phy);
2043                         }
2044                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2045                 }
2046         } else {
2047                 int ret;
2048
2049                 ret = tg3_phy_auxctl_read(tp,
2050                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2051                 if (!ret) {
2052                         if (enable)
2053                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2054                         else
2055                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2056                         tg3_phy_auxctl_write(tp,
2057                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2058                 }
2059         }
2060 }
2061
2062 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2063 {
2064         int ret;
2065         u32 val;
2066
2067         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2068                 return;
2069
2070         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2071         if (!ret)
2072                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2073                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2074 }
2075
2076 static void tg3_phy_apply_otp(struct tg3 *tp)
2077 {
2078         u32 otp, phy;
2079
2080         if (!tp->phy_otp)
2081                 return;
2082
2083         otp = tp->phy_otp;
2084
2085         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2086                 return;
2087
2088         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2089         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2090         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2091
2092         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2093               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2094         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2095
2096         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2097         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2098         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2099
2100         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2101         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2102
2103         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2104         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2105
2106         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2107               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2108         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2109
2110         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2111 }
2112
2113 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2114 {
2115         u32 val;
2116
2117         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2118                 return;
2119
2120         tp->setlpicnt = 0;
2121
2122         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2123             current_link_up == 1 &&
2124             tp->link_config.active_duplex == DUPLEX_FULL &&
2125             (tp->link_config.active_speed == SPEED_100 ||
2126              tp->link_config.active_speed == SPEED_1000)) {
2127                 u32 eeectl;
2128
2129                 if (tp->link_config.active_speed == SPEED_1000)
2130                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2131                 else
2132                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2133
2134                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2135
2136                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2137                                   TG3_CL45_D7_EEERES_STAT, &val);
2138
2139                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2140                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2141                         tp->setlpicnt = 2;
2142         }
2143
2144         if (!tp->setlpicnt) {
2145                 if (current_link_up == 1 &&
2146                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2147                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2148                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2149                 }
2150
2151                 val = tr32(TG3_CPMU_EEE_MODE);
2152                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2153         }
2154 }
2155
2156 static void tg3_phy_eee_enable(struct tg3 *tp)
2157 {
2158         u32 val;
2159
2160         if (tp->link_config.active_speed == SPEED_1000 &&
2161             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2162              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2163              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2164             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2165                 val = MII_TG3_DSP_TAP26_ALNOKO |
2166                       MII_TG3_DSP_TAP26_RMRXSTO;
2167                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2168                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2169         }
2170
2171         val = tr32(TG3_CPMU_EEE_MODE);
2172         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2173 }
2174
2175 static int tg3_wait_macro_done(struct tg3 *tp)
2176 {
2177         int limit = 100;
2178
2179         while (limit--) {
2180                 u32 tmp32;
2181
2182                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2183                         if ((tmp32 & 0x1000) == 0)
2184                                 break;
2185                 }
2186         }
2187         if (limit < 0)
2188                 return -EBUSY;
2189
2190         return 0;
2191 }
2192
2193 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2194 {
2195         static const u32 test_pat[4][6] = {
2196         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2197         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2198         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2199         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2200         };
2201         int chan;
2202
2203         for (chan = 0; chan < 4; chan++) {
2204                 int i;
2205
2206                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2207                              (chan * 0x2000) | 0x0200);
2208                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2209
2210                 for (i = 0; i < 6; i++)
2211                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2212                                      test_pat[chan][i]);
2213
2214                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2215                 if (tg3_wait_macro_done(tp)) {
2216                         *resetp = 1;
2217                         return -EBUSY;
2218                 }
2219
2220                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2221                              (chan * 0x2000) | 0x0200);
2222                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2223                 if (tg3_wait_macro_done(tp)) {
2224                         *resetp = 1;
2225                         return -EBUSY;
2226                 }
2227
2228                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2229                 if (tg3_wait_macro_done(tp)) {
2230                         *resetp = 1;
2231                         return -EBUSY;
2232                 }
2233
2234                 for (i = 0; i < 6; i += 2) {
2235                         u32 low, high;
2236
2237                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2238                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2239                             tg3_wait_macro_done(tp)) {
2240                                 *resetp = 1;
2241                                 return -EBUSY;
2242                         }
2243                         low &= 0x7fff;
2244                         high &= 0x000f;
2245                         if (low != test_pat[chan][i] ||
2246                             high != test_pat[chan][i+1]) {
2247                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2248                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2249                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2250
2251                                 return -EBUSY;
2252                         }
2253                 }
2254         }
2255
2256         return 0;
2257 }
2258
2259 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2260 {
2261         int chan;
2262
2263         for (chan = 0; chan < 4; chan++) {
2264                 int i;
2265
2266                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2267                              (chan * 0x2000) | 0x0200);
2268                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2269                 for (i = 0; i < 6; i++)
2270                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2271                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2272                 if (tg3_wait_macro_done(tp))
2273                         return -EBUSY;
2274         }
2275
2276         return 0;
2277 }
2278
2279 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2280 {
2281         u32 reg32, phy9_orig;
2282         int retries, do_phy_reset, err;
2283
2284         retries = 10;
2285         do_phy_reset = 1;
2286         do {
2287                 if (do_phy_reset) {
2288                         err = tg3_bmcr_reset(tp);
2289                         if (err)
2290                                 return err;
2291                         do_phy_reset = 0;
2292                 }
2293
2294                 /* Disable transmitter and interrupt.  */
2295                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2296                         continue;
2297
2298                 reg32 |= 0x3000;
2299                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2300
2301                 /* Set full-duplex, 1000 mbps.  */
2302                 tg3_writephy(tp, MII_BMCR,
2303                              BMCR_FULLDPLX | BMCR_SPEED1000);
2304
2305                 /* Set to master mode.  */
2306                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2307                         continue;
2308
2309                 tg3_writephy(tp, MII_CTRL1000,
2310                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2311
2312                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2313                 if (err)
2314                         return err;
2315
2316                 /* Block the PHY control access.  */
2317                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2318
2319                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2320                 if (!err)
2321                         break;
2322         } while (--retries);
2323
2324         err = tg3_phy_reset_chanpat(tp);
2325         if (err)
2326                 return err;
2327
2328         tg3_phydsp_write(tp, 0x8005, 0x0000);
2329
2330         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2331         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2332
2333         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2334
2335         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2336
2337         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2338                 reg32 &= ~0x3000;
2339                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2340         } else if (!err)
2341                 err = -EBUSY;
2342
2343         return err;
2344 }
2345
2346 /* This will reset the tigon3 PHY if there is no valid
2347  * link unless the FORCE argument is non-zero.
2348  */
2349 static int tg3_phy_reset(struct tg3 *tp)
2350 {
2351         u32 val, cpmuctrl;
2352         int err;
2353
2354         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2355                 val = tr32(GRC_MISC_CFG);
2356                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2357                 udelay(40);
2358         }
2359         err  = tg3_readphy(tp, MII_BMSR, &val);
2360         err |= tg3_readphy(tp, MII_BMSR, &val);
2361         if (err != 0)
2362                 return -EBUSY;
2363
2364         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2365                 netif_carrier_off(tp->dev);
2366                 tg3_link_report(tp);
2367         }
2368
2369         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2370             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2371             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2372                 err = tg3_phy_reset_5703_4_5(tp);
2373                 if (err)
2374                         return err;
2375                 goto out;
2376         }
2377
2378         cpmuctrl = 0;
2379         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2380             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2381                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2382                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2383                         tw32(TG3_CPMU_CTRL,
2384                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2385         }
2386
2387         err = tg3_bmcr_reset(tp);
2388         if (err)
2389                 return err;
2390
2391         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2392                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2393                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2394
2395                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2396         }
2397
2398         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2399             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2400                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2401                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2402                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2403                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2404                         udelay(40);
2405                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2406                 }
2407         }
2408
2409         if (tg3_flag(tp, 5717_PLUS) &&
2410             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2411                 return 0;
2412
2413         tg3_phy_apply_otp(tp);
2414
2415         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2416                 tg3_phy_toggle_apd(tp, true);
2417         else
2418                 tg3_phy_toggle_apd(tp, false);
2419
2420 out:
2421         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2422             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2423                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2424                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2425                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2426         }
2427
2428         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2429                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2430                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2431         }
2432
2433         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2434                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2435                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2436                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2437                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2438                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439                 }
2440         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2441                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2442                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2443                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2444                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2445                                 tg3_writephy(tp, MII_TG3_TEST1,
2446                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2447                         } else
2448                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2449
2450                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2451                 }
2452         }
2453
2454         /* Set Extended packet length bit (bit 14) on all chips that */
2455         /* support jumbo frames */
2456         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2457                 /* Cannot do read-modify-write on 5401 */
2458                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2459         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2460                 /* Set bit 14 with read-modify-write to preserve other bits */
2461                 err = tg3_phy_auxctl_read(tp,
2462                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2463                 if (!err)
2464                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2465                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2466         }
2467
2468         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2469          * jumbo frames transmission.
2470          */
2471         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2472                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2473                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2474                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2475         }
2476
2477         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478                 /* adjust output voltage */
2479                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2480         }
2481
2482         tg3_phy_toggle_automdix(tp, 1);
2483         tg3_phy_set_wirespeed(tp);
2484         return 0;
2485 }
2486
2487 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2488 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2489 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2490                                           TG3_GPIO_MSG_NEED_VAUX)
2491 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2492         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2493          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2494          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2495          (TG3_GPIO_MSG_DRVR_PRES << 12))
2496
2497 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2498         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2499          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2500          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2501          (TG3_GPIO_MSG_NEED_VAUX << 12))
2502
2503 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2504 {
2505         u32 status, shift;
2506
2507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2508             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2509                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2510         else
2511                 status = tr32(TG3_CPMU_DRV_STATUS);
2512
2513         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2514         status &= ~(TG3_GPIO_MSG_MASK << shift);
2515         status |= (newstat << shift);
2516
2517         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2518             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2519                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2520         else
2521                 tw32(TG3_CPMU_DRV_STATUS, status);
2522
2523         return status >> TG3_APE_GPIO_MSG_SHIFT;
2524 }
2525
2526 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2527 {
2528         if (!tg3_flag(tp, IS_NIC))
2529                 return 0;
2530
2531         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2532             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2533             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2534                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2535                         return -EIO;
2536
2537                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2538
2539                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2540                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2541
2542                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2543         } else {
2544                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2545                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2546         }
2547
2548         return 0;
2549 }
2550
2551 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2552 {
2553         u32 grc_local_ctrl;
2554
2555         if (!tg3_flag(tp, IS_NIC) ||
2556             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2557             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2558                 return;
2559
2560         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2561
2562         tw32_wait_f(GRC_LOCAL_CTRL,
2563                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2564                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2565
2566         tw32_wait_f(GRC_LOCAL_CTRL,
2567                     grc_local_ctrl,
2568                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2569
2570         tw32_wait_f(GRC_LOCAL_CTRL,
2571                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2572                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2573 }
2574
2575 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2576 {
2577         if (!tg3_flag(tp, IS_NIC))
2578                 return;
2579
2580         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2581             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2582                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2583                             (GRC_LCLCTRL_GPIO_OE0 |
2584                              GRC_LCLCTRL_GPIO_OE1 |
2585                              GRC_LCLCTRL_GPIO_OE2 |
2586                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2587                              GRC_LCLCTRL_GPIO_OUTPUT1),
2588                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2589         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2590                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2591                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2592                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2593                                      GRC_LCLCTRL_GPIO_OE1 |
2594                                      GRC_LCLCTRL_GPIO_OE2 |
2595                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2596                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2597                                      tp->grc_local_ctrl;
2598                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2599                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2600
2601                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2602                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2603                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2604
2605                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2606                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2607                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2608         } else {
2609                 u32 no_gpio2;
2610                 u32 grc_local_ctrl = 0;
2611
2612                 /* Workaround to prevent overdrawing Amps. */
2613                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2614                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2615                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2616                                     grc_local_ctrl,
2617                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2618                 }
2619
2620                 /* On 5753 and variants, GPIO2 cannot be used. */
2621                 no_gpio2 = tp->nic_sram_data_cfg &
2622                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2623
2624                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2625                                   GRC_LCLCTRL_GPIO_OE1 |
2626                                   GRC_LCLCTRL_GPIO_OE2 |
2627                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2628                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2629                 if (no_gpio2) {
2630                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2631                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2632                 }
2633                 tw32_wait_f(GRC_LOCAL_CTRL,
2634                             tp->grc_local_ctrl | grc_local_ctrl,
2635                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2636
2637                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2638
2639                 tw32_wait_f(GRC_LOCAL_CTRL,
2640                             tp->grc_local_ctrl | grc_local_ctrl,
2641                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2642
2643                 if (!no_gpio2) {
2644                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2645                         tw32_wait_f(GRC_LOCAL_CTRL,
2646                                     tp->grc_local_ctrl | grc_local_ctrl,
2647                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2648                 }
2649         }
2650 }
2651
2652 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2653 {
2654         u32 msg = 0;
2655
2656         /* Serialize power state transitions */
2657         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2658                 return;
2659
2660         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2661                 msg = TG3_GPIO_MSG_NEED_VAUX;
2662
2663         msg = tg3_set_function_status(tp, msg);
2664
2665         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2666                 goto done;
2667
2668         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2669                 tg3_pwrsrc_switch_to_vaux(tp);
2670         else
2671                 tg3_pwrsrc_die_with_vmain(tp);
2672
2673 done:
2674         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2675 }
2676
2677 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2678 {
2679         bool need_vaux = false;
2680
2681         /* The GPIOs do something completely different on 57765. */
2682         if (!tg3_flag(tp, IS_NIC) ||
2683             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2684                 return;
2685
2686         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2687             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2688             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2689                 tg3_frob_aux_power_5717(tp, include_wol ?
2690                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2691                 return;
2692         }
2693
2694         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2695                 struct net_device *dev_peer;
2696
2697                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2698
2699                 /* remove_one() may have been run on the peer. */
2700                 if (dev_peer) {
2701                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2702
2703                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2704                                 return;
2705
2706                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2707                             tg3_flag(tp_peer, ENABLE_ASF))
2708                                 need_vaux = true;
2709                 }
2710         }
2711
2712         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2713             tg3_flag(tp, ENABLE_ASF))
2714                 need_vaux = true;
2715
2716         if (need_vaux)
2717                 tg3_pwrsrc_switch_to_vaux(tp);
2718         else
2719                 tg3_pwrsrc_die_with_vmain(tp);
2720 }
2721
2722 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2723 {
2724         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2725                 return 1;
2726         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2727                 if (speed != SPEED_10)
2728                         return 1;
2729         } else if (speed == SPEED_10)
2730                 return 1;
2731
2732         return 0;
2733 }
2734
2735 static int tg3_setup_phy(struct tg3 *, int);
2736 static int tg3_halt_cpu(struct tg3 *, u32);
2737
2738 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2739 {
2740         u32 val;
2741
2742         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2743                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2744                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2745                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2746
2747                         sg_dig_ctrl |=
2748                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2749                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2750                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2751                 }
2752                 return;
2753         }
2754
2755         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2756                 tg3_bmcr_reset(tp);
2757                 val = tr32(GRC_MISC_CFG);
2758                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2759                 udelay(40);
2760                 return;
2761         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2762                 u32 phytest;
2763                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2764                         u32 phy;
2765
2766                         tg3_writephy(tp, MII_ADVERTISE, 0);
2767                         tg3_writephy(tp, MII_BMCR,
2768                                      BMCR_ANENABLE | BMCR_ANRESTART);
2769
2770                         tg3_writephy(tp, MII_TG3_FET_TEST,
2771                                      phytest | MII_TG3_FET_SHADOW_EN);
2772                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2773                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2774                                 tg3_writephy(tp,
2775                                              MII_TG3_FET_SHDW_AUXMODE4,
2776                                              phy);
2777                         }
2778                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2779                 }
2780                 return;
2781         } else if (do_low_power) {
2782                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2783                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2784
2785                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2786                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2787                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2788                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2789         }
2790
2791         /* The PHY should not be powered down on some chips because
2792          * of bugs.
2793          */
2794         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2795             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2796             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2797              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2798                 return;
2799
2800         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2801             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2802                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2803                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2804                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2805                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2806         }
2807
2808         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2809 }
2810
2811 /* tp->lock is held. */
2812 static int tg3_nvram_lock(struct tg3 *tp)
2813 {
2814         if (tg3_flag(tp, NVRAM)) {
2815                 int i;
2816
2817                 if (tp->nvram_lock_cnt == 0) {
2818                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2819                         for (i = 0; i < 8000; i++) {
2820                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2821                                         break;
2822                                 udelay(20);
2823                         }
2824                         if (i == 8000) {
2825                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2826                                 return -ENODEV;
2827                         }
2828                 }
2829                 tp->nvram_lock_cnt++;
2830         }
2831         return 0;
2832 }
2833
2834 /* tp->lock is held. */
2835 static void tg3_nvram_unlock(struct tg3 *tp)
2836 {
2837         if (tg3_flag(tp, NVRAM)) {
2838                 if (tp->nvram_lock_cnt > 0)
2839                         tp->nvram_lock_cnt--;
2840                 if (tp->nvram_lock_cnt == 0)
2841                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2842         }
2843 }
2844
2845 /* tp->lock is held. */
2846 static void tg3_enable_nvram_access(struct tg3 *tp)
2847 {
2848         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2849                 u32 nvaccess = tr32(NVRAM_ACCESS);
2850
2851                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2852         }
2853 }
2854
2855 /* tp->lock is held. */
2856 static void tg3_disable_nvram_access(struct tg3 *tp)
2857 {
2858         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2859                 u32 nvaccess = tr32(NVRAM_ACCESS);
2860
2861                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2862         }
2863 }
2864
2865 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2866                                         u32 offset, u32 *val)
2867 {
2868         u32 tmp;
2869         int i;
2870
2871         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2872                 return -EINVAL;
2873
2874         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2875                                         EEPROM_ADDR_DEVID_MASK |
2876                                         EEPROM_ADDR_READ);
2877         tw32(GRC_EEPROM_ADDR,
2878              tmp |
2879              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2880              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2881               EEPROM_ADDR_ADDR_MASK) |
2882              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2883
2884         for (i = 0; i < 1000; i++) {
2885                 tmp = tr32(GRC_EEPROM_ADDR);
2886
2887                 if (tmp & EEPROM_ADDR_COMPLETE)
2888                         break;
2889                 msleep(1);
2890         }
2891         if (!(tmp & EEPROM_ADDR_COMPLETE))
2892                 return -EBUSY;
2893
2894         tmp = tr32(GRC_EEPROM_DATA);
2895
2896         /*
2897          * The data will always be opposite the native endian
2898          * format.  Perform a blind byteswap to compensate.
2899          */
2900         *val = swab32(tmp);
2901
2902         return 0;
2903 }
2904
2905 #define NVRAM_CMD_TIMEOUT 10000
2906
2907 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2908 {
2909         int i;
2910
2911         tw32(NVRAM_CMD, nvram_cmd);
2912         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2913                 udelay(10);
2914                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2915                         udelay(10);
2916                         break;
2917                 }
2918         }
2919
2920         if (i == NVRAM_CMD_TIMEOUT)
2921                 return -EBUSY;
2922
2923         return 0;
2924 }
2925
2926 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2927 {
2928         if (tg3_flag(tp, NVRAM) &&
2929             tg3_flag(tp, NVRAM_BUFFERED) &&
2930             tg3_flag(tp, FLASH) &&
2931             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2932             (tp->nvram_jedecnum == JEDEC_ATMEL))
2933
2934                 addr = ((addr / tp->nvram_pagesize) <<
2935                         ATMEL_AT45DB0X1B_PAGE_POS) +
2936                        (addr % tp->nvram_pagesize);
2937
2938         return addr;
2939 }
2940
2941 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2942 {
2943         if (tg3_flag(tp, NVRAM) &&
2944             tg3_flag(tp, NVRAM_BUFFERED) &&
2945             tg3_flag(tp, FLASH) &&
2946             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2947             (tp->nvram_jedecnum == JEDEC_ATMEL))
2948
2949                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2950                         tp->nvram_pagesize) +
2951                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2952
2953         return addr;
2954 }
2955
2956 /* NOTE: Data read in from NVRAM is byteswapped according to
2957  * the byteswapping settings for all other register accesses.
2958  * tg3 devices are BE devices, so on a BE machine, the data
2959  * returned will be exactly as it is seen in NVRAM.  On a LE
2960  * machine, the 32-bit value will be byteswapped.
2961  */
2962 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2963 {
2964         int ret;
2965
2966         if (!tg3_flag(tp, NVRAM))
2967                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2968
2969         offset = tg3_nvram_phys_addr(tp, offset);
2970
2971         if (offset > NVRAM_ADDR_MSK)
2972                 return -EINVAL;
2973
2974         ret = tg3_nvram_lock(tp);
2975         if (ret)
2976                 return ret;
2977
2978         tg3_enable_nvram_access(tp);
2979
2980         tw32(NVRAM_ADDR, offset);
2981         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2982                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2983
2984         if (ret == 0)
2985                 *val = tr32(NVRAM_RDDATA);
2986
2987         tg3_disable_nvram_access(tp);
2988
2989         tg3_nvram_unlock(tp);
2990
2991         return ret;
2992 }
2993
2994 /* Ensures NVRAM data is in bytestream format. */
2995 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2996 {
2997         u32 v;
2998         int res = tg3_nvram_read(tp, offset, &v);
2999         if (!res)
3000                 *val = cpu_to_be32(v);
3001         return res;
3002 }
3003
3004 #define RX_CPU_SCRATCH_BASE     0x30000
3005 #define RX_CPU_SCRATCH_SIZE     0x04000
3006 #define TX_CPU_SCRATCH_BASE     0x34000
3007 #define TX_CPU_SCRATCH_SIZE     0x04000
3008
3009 /* tp->lock is held. */
3010 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3011 {
3012         int i;
3013
3014         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3015
3016         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3017                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3018
3019                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3020                 return 0;
3021         }
3022         if (offset == RX_CPU_BASE) {
3023                 for (i = 0; i < 10000; i++) {
3024                         tw32(offset + CPU_STATE, 0xffffffff);
3025                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3026                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3027                                 break;
3028                 }
3029
3030                 tw32(offset + CPU_STATE, 0xffffffff);
3031                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3032                 udelay(10);
3033         } else {
3034                 for (i = 0; i < 10000; i++) {
3035                         tw32(offset + CPU_STATE, 0xffffffff);
3036                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3037                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3038                                 break;
3039                 }
3040         }
3041
3042         if (i >= 10000) {
3043                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3044                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3045                 return -ENODEV;
3046         }
3047
3048         /* Clear firmware's nvram arbitration. */
3049         if (tg3_flag(tp, NVRAM))
3050                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3051         return 0;
3052 }
3053
3054 struct fw_info {
3055         unsigned int fw_base;
3056         unsigned int fw_len;
3057         const __be32 *fw_data;
3058 };
3059
3060 /* tp->lock is held. */
3061 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3062                                  u32 cpu_scratch_base, int cpu_scratch_size,
3063                                  struct fw_info *info)
3064 {
3065         int err, lock_err, i;
3066         void (*write_op)(struct tg3 *, u32, u32);
3067
3068         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3069                 netdev_err(tp->dev,
3070                            "%s: Trying to load TX cpu firmware which is 5705\n",
3071                            __func__);
3072                 return -EINVAL;
3073         }
3074
3075         if (tg3_flag(tp, 5705_PLUS))
3076                 write_op = tg3_write_mem;
3077         else
3078                 write_op = tg3_write_indirect_reg32;
3079
3080         /* It is possible that bootcode is still loading at this point.
3081          * Get the nvram lock first before halting the cpu.
3082          */
3083         lock_err = tg3_nvram_lock(tp);
3084         err = tg3_halt_cpu(tp, cpu_base);
3085         if (!lock_err)
3086                 tg3_nvram_unlock(tp);
3087         if (err)
3088                 goto out;
3089
3090         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3091                 write_op(tp, cpu_scratch_base + i, 0);
3092         tw32(cpu_base + CPU_STATE, 0xffffffff);
3093         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3094         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3095                 write_op(tp, (cpu_scratch_base +
3096                               (info->fw_base & 0xffff) +
3097                               (i * sizeof(u32))),
3098                               be32_to_cpu(info->fw_data[i]));
3099
3100         err = 0;
3101
3102 out:
3103         return err;
3104 }
3105
3106 /* tp->lock is held. */
3107 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3108 {
3109         struct fw_info info;
3110         const __be32 *fw_data;
3111         int err, i;
3112
3113         fw_data = (void *)tp->fw->data;
3114
3115         /* Firmware blob starts with version numbers, followed by
3116            start address and length. We are setting complete length.
3117            length = end_address_of_bss - start_address_of_text.
3118            Remainder is the blob to be loaded contiguously
3119            from start address. */
3120
3121         info.fw_base = be32_to_cpu(fw_data[1]);
3122         info.fw_len = tp->fw->size - 12;
3123         info.fw_data = &fw_data[3];
3124
3125         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3126                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3127                                     &info);
3128         if (err)
3129                 return err;
3130
3131         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3132                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3133                                     &info);
3134         if (err)
3135                 return err;
3136
3137         /* Now startup only the RX cpu. */
3138         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3139         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3140
3141         for (i = 0; i < 5; i++) {
3142                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3143                         break;
3144                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3145                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3146                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3147                 udelay(1000);
3148         }
3149         if (i >= 5) {
3150                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3151                            "should be %08x\n", __func__,
3152                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3153                 return -ENODEV;
3154         }
3155         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3156         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3157
3158         return 0;
3159 }
3160
3161 /* tp->lock is held. */
3162 static int tg3_load_tso_firmware(struct tg3 *tp)
3163 {
3164         struct fw_info info;
3165         const __be32 *fw_data;
3166         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3167         int err, i;
3168
3169         if (tg3_flag(tp, HW_TSO_1) ||
3170             tg3_flag(tp, HW_TSO_2) ||
3171             tg3_flag(tp, HW_TSO_3))
3172                 return 0;
3173
3174         fw_data = (void *)tp->fw->data;
3175
3176         /* Firmware blob starts with version numbers, followed by
3177            start address and length. We are setting complete length.
3178            length = end_address_of_bss - start_address_of_text.
3179            Remainder is the blob to be loaded contiguously
3180            from start address. */
3181
3182         info.fw_base = be32_to_cpu(fw_data[1]);
3183         cpu_scratch_size = tp->fw_len;
3184         info.fw_len = tp->fw->size - 12;
3185         info.fw_data = &fw_data[3];
3186
3187         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3188                 cpu_base = RX_CPU_BASE;
3189                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3190         } else {
3191                 cpu_base = TX_CPU_BASE;
3192                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3193                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3194         }
3195
3196         err = tg3_load_firmware_cpu(tp, cpu_base,
3197                                     cpu_scratch_base, cpu_scratch_size,
3198                                     &info);
3199         if (err)
3200                 return err;
3201
3202         /* Now startup the cpu. */
3203         tw32(cpu_base + CPU_STATE, 0xffffffff);
3204         tw32_f(cpu_base + CPU_PC, info.fw_base);
3205
3206         for (i = 0; i < 5; i++) {
3207                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3208                         break;
3209                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3210                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3211                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3212                 udelay(1000);
3213         }
3214         if (i >= 5) {
3215                 netdev_err(tp->dev,
3216                            "%s fails to set CPU PC, is %08x should be %08x\n",
3217                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3218                 return -ENODEV;
3219         }
3220         tw32(cpu_base + CPU_STATE, 0xffffffff);
3221         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3222         return 0;
3223 }
3224
3225
3226 /* tp->lock is held. */
3227 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3228 {
3229         u32 addr_high, addr_low;
3230         int i;
3231
3232         addr_high = ((tp->dev->dev_addr[0] << 8) |
3233                      tp->dev->dev_addr[1]);
3234         addr_low = ((tp->dev->dev_addr[2] << 24) |
3235                     (tp->dev->dev_addr[3] << 16) |
3236                     (tp->dev->dev_addr[4] <<  8) |
3237                     (tp->dev->dev_addr[5] <<  0));
3238         for (i = 0; i < 4; i++) {
3239                 if (i == 1 && skip_mac_1)
3240                         continue;
3241                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3242                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3243         }
3244
3245         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3246             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3247                 for (i = 0; i < 12; i++) {
3248                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3249                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3250                 }
3251         }
3252
3253         addr_high = (tp->dev->dev_addr[0] +
3254                      tp->dev->dev_addr[1] +
3255                      tp->dev->dev_addr[2] +
3256                      tp->dev->dev_addr[3] +
3257                      tp->dev->dev_addr[4] +
3258                      tp->dev->dev_addr[5]) &
3259                 TX_BACKOFF_SEED_MASK;
3260         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3261 }
3262
3263 static void tg3_enable_register_access(struct tg3 *tp)
3264 {
3265         /*
3266          * Make sure register accesses (indirect or otherwise) will function
3267          * correctly.
3268          */
3269         pci_write_config_dword(tp->pdev,
3270                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3271 }
3272
3273 static int tg3_power_up(struct tg3 *tp)
3274 {
3275         int err;
3276
3277         tg3_enable_register_access(tp);
3278
3279         err = pci_set_power_state(tp->pdev, PCI_D0);
3280         if (!err) {
3281                 /* Switch out of Vaux if it is a NIC */
3282                 tg3_pwrsrc_switch_to_vmain(tp);
3283         } else {
3284                 netdev_err(tp->dev, "Transition to D0 failed\n");
3285         }
3286
3287         return err;
3288 }
3289
3290 static int tg3_power_down_prepare(struct tg3 *tp)
3291 {
3292         u32 misc_host_ctrl;
3293         bool device_should_wake, do_low_power;
3294
3295         tg3_enable_register_access(tp);
3296
3297         /* Restore the CLKREQ setting. */
3298         if (tg3_flag(tp, CLKREQ_BUG)) {
3299                 u16 lnkctl;
3300
3301                 pci_read_config_word(tp->pdev,
3302                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3303                                      &lnkctl);
3304                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3305                 pci_write_config_word(tp->pdev,
3306                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3307                                       lnkctl);
3308         }
3309
3310         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3311         tw32(TG3PCI_MISC_HOST_CTRL,
3312              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3313
3314         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3315                              tg3_flag(tp, WOL_ENABLE);
3316
3317         if (tg3_flag(tp, USE_PHYLIB)) {
3318                 do_low_power = false;
3319                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3320                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3321                         struct phy_device *phydev;
3322                         u32 phyid, advertising;
3323
3324                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3325
3326                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3327
3328                         tp->link_config.orig_speed = phydev->speed;
3329                         tp->link_config.orig_duplex = phydev->duplex;
3330                         tp->link_config.orig_autoneg = phydev->autoneg;
3331                         tp->link_config.orig_advertising = phydev->advertising;
3332
3333                         advertising = ADVERTISED_TP |
3334                                       ADVERTISED_Pause |
3335                                       ADVERTISED_Autoneg |
3336                                       ADVERTISED_10baseT_Half;
3337
3338                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3339                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3340                                         advertising |=
3341                                                 ADVERTISED_100baseT_Half |
3342                                                 ADVERTISED_100baseT_Full |
3343                                                 ADVERTISED_10baseT_Full;
3344                                 else
3345                                         advertising |= ADVERTISED_10baseT_Full;
3346                         }
3347
3348                         phydev->advertising = advertising;
3349
3350                         phy_start_aneg(phydev);
3351
3352                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3353                         if (phyid != PHY_ID_BCMAC131) {
3354                                 phyid &= PHY_BCM_OUI_MASK;
3355                                 if (phyid == PHY_BCM_OUI_1 ||
3356                                     phyid == PHY_BCM_OUI_2 ||
3357                                     phyid == PHY_BCM_OUI_3)
3358                                         do_low_power = true;
3359                         }
3360                 }
3361         } else {
3362                 do_low_power = true;
3363
3364                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3365                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3366                         tp->link_config.orig_speed = tp->link_config.speed;
3367                         tp->link_config.orig_duplex = tp->link_config.duplex;
3368                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3369                 }
3370
3371                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3372                         tp->link_config.speed = SPEED_10;
3373                         tp->link_config.duplex = DUPLEX_HALF;
3374                         tp->link_config.autoneg = AUTONEG_ENABLE;
3375                         tg3_setup_phy(tp, 0);
3376                 }
3377         }
3378
3379         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3380                 u32 val;
3381
3382                 val = tr32(GRC_VCPU_EXT_CTRL);
3383                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3384         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3385                 int i;
3386                 u32 val;
3387
3388                 for (i = 0; i < 200; i++) {
3389                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3390                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3391                                 break;
3392                         msleep(1);
3393                 }
3394         }
3395         if (tg3_flag(tp, WOL_CAP))
3396                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3397                                                      WOL_DRV_STATE_SHUTDOWN |
3398                                                      WOL_DRV_WOL |
3399                                                      WOL_SET_MAGIC_PKT);
3400
3401         if (device_should_wake) {
3402                 u32 mac_mode;
3403
3404                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3405                         if (do_low_power &&
3406                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3407                                 tg3_phy_auxctl_write(tp,
3408                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3409                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3410                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3411                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3412                                 udelay(40);
3413                         }
3414
3415                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3416                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3417                         else
3418                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3419
3420                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3421                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3422                             ASIC_REV_5700) {
3423                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3424                                              SPEED_100 : SPEED_10;
3425                                 if (tg3_5700_link_polarity(tp, speed))
3426                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3427                                 else
3428                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3429                         }
3430                 } else {
3431                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3432                 }
3433
3434                 if (!tg3_flag(tp, 5750_PLUS))
3435                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3436
3437                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3438                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3439                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3440                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3441
3442                 if (tg3_flag(tp, ENABLE_APE))
3443                         mac_mode |= MAC_MODE_APE_TX_EN |
3444                                     MAC_MODE_APE_RX_EN |
3445                                     MAC_MODE_TDE_ENABLE;
3446
3447                 tw32_f(MAC_MODE, mac_mode);
3448                 udelay(100);
3449
3450                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3451                 udelay(10);
3452         }
3453
3454         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3455             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3456              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3457                 u32 base_val;
3458
3459                 base_val = tp->pci_clock_ctrl;
3460                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3461                              CLOCK_CTRL_TXCLK_DISABLE);
3462
3463                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3464                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3465         } else if (tg3_flag(tp, 5780_CLASS) ||
3466                    tg3_flag(tp, CPMU_PRESENT) ||
3467                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3468                 /* do nothing */
3469         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3470                 u32 newbits1, newbits2;
3471
3472                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3473                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3474                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3475                                     CLOCK_CTRL_TXCLK_DISABLE |
3476                                     CLOCK_CTRL_ALTCLK);
3477                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3478                 } else if (tg3_flag(tp, 5705_PLUS)) {
3479                         newbits1 = CLOCK_CTRL_625_CORE;
3480                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3481                 } else {
3482                         newbits1 = CLOCK_CTRL_ALTCLK;
3483                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3484                 }
3485
3486                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3487                             40);
3488
3489                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3490                             40);
3491
3492                 if (!tg3_flag(tp, 5705_PLUS)) {
3493                         u32 newbits3;
3494
3495                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3496                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3497                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3498                                             CLOCK_CTRL_TXCLK_DISABLE |
3499                                             CLOCK_CTRL_44MHZ_CORE);
3500                         } else {
3501                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3502                         }
3503
3504                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3505                                     tp->pci_clock_ctrl | newbits3, 40);
3506                 }
3507         }
3508
3509         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3510                 tg3_power_down_phy(tp, do_low_power);
3511
3512         tg3_frob_aux_power(tp, true);
3513
3514         /* Workaround for unstable PLL clock */
3515         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3516             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3517                 u32 val = tr32(0x7d00);
3518
3519                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3520                 tw32(0x7d00, val);
3521                 if (!tg3_flag(tp, ENABLE_ASF)) {
3522                         int err;
3523
3524                         err = tg3_nvram_lock(tp);
3525                         tg3_halt_cpu(tp, RX_CPU_BASE);
3526                         if (!err)
3527                                 tg3_nvram_unlock(tp);
3528                 }
3529         }
3530
3531         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3532
3533         return 0;
3534 }
3535
3536 static void tg3_power_down(struct tg3 *tp)
3537 {
3538         tg3_power_down_prepare(tp);
3539
3540         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3541         pci_set_power_state(tp->pdev, PCI_D3hot);
3542 }
3543
3544 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3545 {
3546         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3547         case MII_TG3_AUX_STAT_10HALF:
3548                 *speed = SPEED_10;
3549                 *duplex = DUPLEX_HALF;
3550                 break;
3551
3552         case MII_TG3_AUX_STAT_10FULL:
3553                 *speed = SPEED_10;
3554                 *duplex = DUPLEX_FULL;
3555                 break;
3556
3557         case MII_TG3_AUX_STAT_100HALF:
3558                 *speed = SPEED_100;
3559                 *duplex = DUPLEX_HALF;
3560                 break;
3561
3562         case MII_TG3_AUX_STAT_100FULL:
3563                 *speed = SPEED_100;
3564                 *duplex = DUPLEX_FULL;
3565                 break;
3566
3567         case MII_TG3_AUX_STAT_1000HALF:
3568                 *speed = SPEED_1000;
3569                 *duplex = DUPLEX_HALF;
3570                 break;
3571
3572         case MII_TG3_AUX_STAT_1000FULL:
3573                 *speed = SPEED_1000;
3574                 *duplex = DUPLEX_FULL;
3575                 break;
3576
3577         default:
3578                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3579                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3580                                  SPEED_10;
3581                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3582                                   DUPLEX_HALF;
3583                         break;
3584                 }
3585                 *speed = SPEED_INVALID;
3586                 *duplex = DUPLEX_INVALID;
3587                 break;
3588         }
3589 }
3590
3591 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3592 {
3593         int err = 0;
3594         u32 val, new_adv;
3595
3596         new_adv = ADVERTISE_CSMA;
3597         if (advertise & ADVERTISED_10baseT_Half)
3598                 new_adv |= ADVERTISE_10HALF;
3599         if (advertise & ADVERTISED_10baseT_Full)
3600                 new_adv |= ADVERTISE_10FULL;
3601         if (advertise & ADVERTISED_100baseT_Half)
3602                 new_adv |= ADVERTISE_100HALF;
3603         if (advertise & ADVERTISED_100baseT_Full)
3604                 new_adv |= ADVERTISE_100FULL;
3605
3606         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3607
3608         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3609         if (err)
3610                 goto done;
3611
3612         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3613                 goto done;
3614
3615         new_adv = 0;
3616         if (advertise & ADVERTISED_1000baseT_Half)
3617                 new_adv |= ADVERTISE_1000HALF;
3618         if (advertise & ADVERTISED_1000baseT_Full)
3619                 new_adv |= ADVERTISE_1000FULL;
3620
3621         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3622             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3623                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3624
3625         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3626         if (err)
3627                 goto done;
3628
3629         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3630                 goto done;
3631
3632         tw32(TG3_CPMU_EEE_MODE,
3633              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3634
3635         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3636         if (!err) {
3637                 u32 err2;
3638
3639                 val = 0;
3640                 /* Advertise 100-BaseTX EEE ability */
3641                 if (advertise & ADVERTISED_100baseT_Full)
3642                         val |= MDIO_AN_EEE_ADV_100TX;
3643                 /* Advertise 1000-BaseT EEE ability */
3644                 if (advertise & ADVERTISED_1000baseT_Full)
3645                         val |= MDIO_AN_EEE_ADV_1000T;
3646                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3647                 if (err)
3648                         val = 0;
3649
3650                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3651                 case ASIC_REV_5717:
3652                 case ASIC_REV_57765:
3653                 case ASIC_REV_5719:
3654                         /* If we advertised any eee advertisements above... */
3655                         if (val)
3656                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3657                                       MII_TG3_DSP_TAP26_RMRXSTO |
3658                                       MII_TG3_DSP_TAP26_OPCSINPT;
3659                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3660                         /* Fall through */
3661                 case ASIC_REV_5720:
3662                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3663                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3664                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3665                 }
3666
3667                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3668                 if (!err)
3669                         err = err2;
3670         }
3671
3672 done:
3673         return err;
3674 }
3675
3676 static void tg3_phy_copper_begin(struct tg3 *tp)
3677 {
3678         u32 new_adv;
3679         int i;
3680
3681         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3682                 new_adv = ADVERTISED_10baseT_Half |
3683                           ADVERTISED_10baseT_Full;
3684                 if (tg3_flag(tp, WOL_SPEED_100MB))
3685                         new_adv |= ADVERTISED_100baseT_Half |
3686                                    ADVERTISED_100baseT_Full;
3687
3688                 tg3_phy_autoneg_cfg(tp, new_adv,
3689                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3690         } else if (tp->link_config.speed == SPEED_INVALID) {
3691                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3692                         tp->link_config.advertising &=
3693                                 ~(ADVERTISED_1000baseT_Half |
3694                                   ADVERTISED_1000baseT_Full);
3695
3696                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3697                                     tp->link_config.flowctrl);
3698         } else {
3699                 /* Asking for a specific link mode. */
3700                 if (tp->link_config.speed == SPEED_1000) {
3701                         if (tp->link_config.duplex == DUPLEX_FULL)
3702                                 new_adv = ADVERTISED_1000baseT_Full;
3703                         else
3704                                 new_adv = ADVERTISED_1000baseT_Half;
3705                 } else if (tp->link_config.speed == SPEED_100) {
3706                         if (tp->link_config.duplex == DUPLEX_FULL)
3707                                 new_adv = ADVERTISED_100baseT_Full;
3708                         else
3709                                 new_adv = ADVERTISED_100baseT_Half;
3710                 } else {
3711                         if (tp->link_config.duplex == DUPLEX_FULL)
3712                                 new_adv = ADVERTISED_10baseT_Full;
3713                         else
3714                                 new_adv = ADVERTISED_10baseT_Half;
3715                 }
3716
3717                 tg3_phy_autoneg_cfg(tp, new_adv,
3718                                     tp->link_config.flowctrl);
3719         }
3720
3721         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3722             tp->link_config.speed != SPEED_INVALID) {
3723                 u32 bmcr, orig_bmcr;
3724
3725                 tp->link_config.active_speed = tp->link_config.speed;
3726                 tp->link_config.active_duplex = tp->link_config.duplex;
3727
3728                 bmcr = 0;
3729                 switch (tp->link_config.speed) {
3730                 default:
3731                 case SPEED_10:
3732                         break;
3733
3734                 case SPEED_100:
3735                         bmcr |= BMCR_SPEED100;
3736                         break;
3737
3738                 case SPEED_1000:
3739                         bmcr |= BMCR_SPEED1000;
3740                         break;
3741                 }
3742
3743                 if (tp->link_config.duplex == DUPLEX_FULL)
3744                         bmcr |= BMCR_FULLDPLX;
3745
3746                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3747                     (bmcr != orig_bmcr)) {
3748                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3749                         for (i = 0; i < 1500; i++) {
3750                                 u32 tmp;
3751
3752                                 udelay(10);
3753                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3754                                     tg3_readphy(tp, MII_BMSR, &tmp))
3755                                         continue;
3756                                 if (!(tmp & BMSR_LSTATUS)) {
3757                                         udelay(40);
3758                                         break;
3759                                 }
3760                         }
3761                         tg3_writephy(tp, MII_BMCR, bmcr);
3762                         udelay(40);
3763                 }
3764         } else {
3765                 tg3_writephy(tp, MII_BMCR,
3766                              BMCR_ANENABLE | BMCR_ANRESTART);
3767         }
3768 }
3769
3770 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3771 {
3772         int err;
3773
3774         /* Turn off tap power management. */
3775         /* Set Extended packet length bit */
3776         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3777
3778         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3779         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3780         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3781         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3782         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3783
3784         udelay(40);
3785
3786         return err;
3787 }
3788
3789 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3790 {
3791         u32 adv_reg, all_mask = 0;
3792
3793         if (mask & ADVERTISED_10baseT_Half)
3794                 all_mask |= ADVERTISE_10HALF;
3795         if (mask & ADVERTISED_10baseT_Full)
3796                 all_mask |= ADVERTISE_10FULL;
3797         if (mask & ADVERTISED_100baseT_Half)
3798                 all_mask |= ADVERTISE_100HALF;
3799         if (mask & ADVERTISED_100baseT_Full)
3800                 all_mask |= ADVERTISE_100FULL;
3801
3802         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3803                 return 0;
3804
3805         if ((adv_reg & ADVERTISE_ALL) != all_mask)
3806                 return 0;
3807
3808         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3809                 u32 tg3_ctrl;
3810
3811                 all_mask = 0;
3812                 if (mask & ADVERTISED_1000baseT_Half)
3813                         all_mask |= ADVERTISE_1000HALF;
3814                 if (mask & ADVERTISED_1000baseT_Full)
3815                         all_mask |= ADVERTISE_1000FULL;
3816
3817                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3818                         return 0;
3819
3820                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3821                 if (tg3_ctrl != all_mask)
3822                         return 0;
3823         }
3824
3825         return 1;
3826 }
3827
3828 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3829 {
3830         u32 curadv, reqadv;
3831
3832         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3833                 return 1;
3834
3835         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3836         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3837
3838         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3839                 if (curadv != reqadv)
3840                         return 0;
3841
3842                 if (tg3_flag(tp, PAUSE_AUTONEG))
3843                         tg3_readphy(tp, MII_LPA, rmtadv);
3844         } else {
3845                 /* Reprogram the advertisement register, even if it
3846                  * does not affect the current link.  If the link
3847                  * gets renegotiated in the future, we can save an
3848                  * additional renegotiation cycle by advertising
3849                  * it correctly in the first place.
3850                  */
3851                 if (curadv != reqadv) {
3852                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3853                                      ADVERTISE_PAUSE_ASYM);
3854                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3855                 }
3856         }
3857
3858         return 1;
3859 }
3860
3861 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3862 {
3863         int current_link_up;
3864         u32 bmsr, val;
3865         u32 lcl_adv, rmt_adv;
3866         u16 current_speed;
3867         u8 current_duplex;
3868         int i, err;
3869
3870         tw32(MAC_EVENT, 0);
3871
3872         tw32_f(MAC_STATUS,
3873              (MAC_STATUS_SYNC_CHANGED |
3874               MAC_STATUS_CFG_CHANGED |
3875               MAC_STATUS_MI_COMPLETION |
3876               MAC_STATUS_LNKSTATE_CHANGED));
3877         udelay(40);
3878
3879         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3880                 tw32_f(MAC_MI_MODE,
3881                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3882                 udelay(80);
3883         }
3884
3885         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3886
3887         /* Some third-party PHYs need to be reset on link going
3888          * down.
3889          */
3890         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3891              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3892              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3893             netif_carrier_ok(tp->dev)) {
3894                 tg3_readphy(tp, MII_BMSR, &bmsr);
3895                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3896                     !(bmsr & BMSR_LSTATUS))
3897                         force_reset = 1;
3898         }
3899         if (force_reset)
3900                 tg3_phy_reset(tp);
3901
3902         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3903                 tg3_readphy(tp, MII_BMSR, &bmsr);
3904                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3905                     !tg3_flag(tp, INIT_COMPLETE))
3906                         bmsr = 0;
3907
3908                 if (!(bmsr & BMSR_LSTATUS)) {
3909                         err = tg3_init_5401phy_dsp(tp);
3910                         if (err)
3911                                 return err;
3912
3913                         tg3_readphy(tp, MII_BMSR, &bmsr);
3914                         for (i = 0; i < 1000; i++) {
3915                                 udelay(10);
3916                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3917                                     (bmsr & BMSR_LSTATUS)) {
3918                                         udelay(40);
3919                                         break;
3920                                 }
3921                         }
3922
3923                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3924                             TG3_PHY_REV_BCM5401_B0 &&
3925                             !(bmsr & BMSR_LSTATUS) &&
3926                             tp->link_config.active_speed == SPEED_1000) {
3927                                 err = tg3_phy_reset(tp);
3928                                 if (!err)
3929                                         err = tg3_init_5401phy_dsp(tp);
3930                                 if (err)
3931                                         return err;
3932                         }
3933                 }
3934         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3935                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3936                 /* 5701 {A0,B0} CRC bug workaround */
3937                 tg3_writephy(tp, 0x15, 0x0a75);
3938                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3939                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3940                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3941         }
3942
3943         /* Clear pending interrupts... */
3944         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3945         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3946
3947         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3948                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3949         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3950                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3951
3952         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3953             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3954                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3955                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3956                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3957                 else
3958                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3959         }
3960
3961         current_link_up = 0;
3962         current_speed = SPEED_INVALID;
3963         current_duplex = DUPLEX_INVALID;
3964
3965         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3966                 err = tg3_phy_auxctl_read(tp,
3967                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3968                                           &val);
3969                 if (!err && !(val & (1 << 10))) {
3970                         tg3_phy_auxctl_write(tp,
3971                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3972                                              val | (1 << 10));
3973                         goto relink;
3974                 }
3975         }
3976
3977         bmsr = 0;
3978         for (i = 0; i < 100; i++) {
3979                 tg3_readphy(tp, MII_BMSR, &bmsr);
3980                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3981                     (bmsr & BMSR_LSTATUS))
3982                         break;
3983                 udelay(40);
3984         }
3985
3986         if (bmsr & BMSR_LSTATUS) {
3987                 u32 aux_stat, bmcr;
3988
3989                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3990                 for (i = 0; i < 2000; i++) {
3991                         udelay(10);
3992                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3993                             aux_stat)
3994                                 break;
3995                 }
3996
3997                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3998                                              &current_speed,
3999                                              &current_duplex);
4000
4001                 bmcr = 0;
4002                 for (i = 0; i < 200; i++) {
4003                         tg3_readphy(tp, MII_BMCR, &bmcr);
4004                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4005                                 continue;
4006                         if (bmcr && bmcr != 0x7fff)
4007                                 break;
4008                         udelay(10);
4009                 }
4010
4011                 lcl_adv = 0;
4012                 rmt_adv = 0;
4013
4014                 tp->link_config.active_speed = current_speed;
4015                 tp->link_config.active_duplex = current_duplex;
4016
4017                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4018                         if ((bmcr & BMCR_ANENABLE) &&
4019                             tg3_copper_is_advertising_all(tp,
4020                                                 tp->link_config.advertising)) {
4021                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4022                                                                   &rmt_adv))
4023                                         current_link_up = 1;
4024                         }
4025                 } else {
4026                         if (!(bmcr & BMCR_ANENABLE) &&
4027                             tp->link_config.speed == current_speed &&
4028                             tp->link_config.duplex == current_duplex &&
4029                             tp->link_config.flowctrl ==
4030                             tp->link_config.active_flowctrl) {
4031                                 current_link_up = 1;
4032                         }
4033                 }
4034
4035                 if (current_link_up == 1 &&
4036                     tp->link_config.active_duplex == DUPLEX_FULL)
4037                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4038         }
4039
4040 relink:
4041         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4042                 tg3_phy_copper_begin(tp);
4043
4044                 tg3_readphy(tp, MII_BMSR, &bmsr);
4045                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4046                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4047                         current_link_up = 1;
4048         }
4049
4050         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4051         if (current_link_up == 1) {
4052                 if (tp->link_config.active_speed == SPEED_100 ||
4053                     tp->link_config.active_speed == SPEED_10)
4054                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4055                 else
4056                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4057         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4058                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4059         else
4060                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4061
4062         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4063         if (tp->link_config.active_duplex == DUPLEX_HALF)
4064                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4065
4066         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4067                 if (current_link_up == 1 &&
4068                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4069                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4070                 else
4071                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4072         }
4073
4074         /* ??? Without this setting Netgear GA302T PHY does not
4075          * ??? send/receive packets...
4076          */
4077         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4078             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4079                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4080                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4081                 udelay(80);
4082         }
4083
4084         tw32_f(MAC_MODE, tp->mac_mode);
4085         udelay(40);
4086
4087         tg3_phy_eee_adjust(tp, current_link_up);
4088
4089         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4090                 /* Polled via timer. */
4091                 tw32_f(MAC_EVENT, 0);
4092         } else {
4093                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4094         }
4095         udelay(40);
4096
4097         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4098             current_link_up == 1 &&
4099             tp->link_config.active_speed == SPEED_1000 &&
4100             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4101                 udelay(120);
4102                 tw32_f(MAC_STATUS,
4103                      (MAC_STATUS_SYNC_CHANGED |
4104                       MAC_STATUS_CFG_CHANGED));
4105                 udelay(40);
4106                 tg3_write_mem(tp,
4107                               NIC_SRAM_FIRMWARE_MBOX,
4108                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4109         }
4110
4111         /* Prevent send BD corruption. */
4112         if (tg3_flag(tp, CLKREQ_BUG)) {
4113                 u16 oldlnkctl, newlnkctl;
4114
4115                 pci_read_config_word(tp->pdev,
4116                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4117                                      &oldlnkctl);
4118                 if (tp->link_config.active_speed == SPEED_100 ||
4119                     tp->link_config.active_speed == SPEED_10)
4120                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4121                 else
4122                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4123                 if (newlnkctl != oldlnkctl)
4124                         pci_write_config_word(tp->pdev,
4125                                               pci_pcie_cap(tp->pdev) +
4126                                               PCI_EXP_LNKCTL, newlnkctl);
4127         }
4128
4129         if (current_link_up != netif_carrier_ok(tp->dev)) {
4130                 if (current_link_up)
4131                         netif_carrier_on(tp->dev);
4132                 else
4133                         netif_carrier_off(tp->dev);
4134                 tg3_link_report(tp);
4135         }
4136
4137         return 0;
4138 }
4139
4140 struct tg3_fiber_aneginfo {
4141         int state;
4142 #define ANEG_STATE_UNKNOWN              0
4143 #define ANEG_STATE_AN_ENABLE            1
4144 #define ANEG_STATE_RESTART_INIT         2
4145 #define ANEG_STATE_RESTART              3
4146 #define ANEG_STATE_DISABLE_LINK_OK      4
4147 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4148 #define ANEG_STATE_ABILITY_DETECT       6
4149 #define ANEG_STATE_ACK_DETECT_INIT      7
4150 #define ANEG_STATE_ACK_DETECT           8
4151 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4152 #define ANEG_STATE_COMPLETE_ACK         10
4153 #define ANEG_STATE_IDLE_DETECT_INIT     11
4154 #define ANEG_STATE_IDLE_DETECT          12
4155 #define ANEG_STATE_LINK_OK              13
4156 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4157 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4158
4159         u32 flags;
4160 #define MR_AN_ENABLE            0x00000001
4161 #define MR_RESTART_AN           0x00000002
4162 #define MR_AN_COMPLETE          0x00000004
4163 #define MR_PAGE_RX              0x00000008
4164 #define MR_NP_LOADED            0x00000010
4165 #define MR_TOGGLE_TX            0x00000020
4166 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4167 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4168 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4169 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4170 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4171 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4172 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4173 #define MR_TOGGLE_RX            0x00002000
4174 #define MR_NP_RX                0x00004000
4175
4176 #define MR_LINK_OK              0x80000000
4177
4178         unsigned long link_time, cur_time;
4179
4180         u32 ability_match_cfg;
4181         int ability_match_count;
4182
4183         char ability_match, idle_match, ack_match;
4184
4185         u32 txconfig, rxconfig;
4186 #define ANEG_CFG_NP             0x00000080
4187 #define ANEG_CFG_ACK            0x00000040
4188 #define ANEG_CFG_RF2            0x00000020
4189 #define ANEG_CFG_RF1            0x00000010
4190 #define ANEG_CFG_PS2            0x00000001
4191 #define ANEG_CFG_PS1            0x00008000
4192 #define ANEG_CFG_HD             0x00004000
4193 #define ANEG_CFG_FD             0x00002000
4194 #define ANEG_CFG_INVAL          0x00001f06
4195
4196 };
4197 #define ANEG_OK         0
4198 #define ANEG_DONE       1
4199 #define ANEG_TIMER_ENAB 2
4200 #define ANEG_FAILED     -1
4201
4202 #define ANEG_STATE_SETTLE_TIME  10000
4203
4204 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4205                                    struct tg3_fiber_aneginfo *ap)
4206 {
4207         u16 flowctrl;
4208         unsigned long delta;
4209         u32 rx_cfg_reg;
4210         int ret;
4211
4212         if (ap->state == ANEG_STATE_UNKNOWN) {
4213                 ap->rxconfig = 0;
4214                 ap->link_time = 0;
4215                 ap->cur_time = 0;
4216                 ap->ability_match_cfg = 0;
4217                 ap->ability_match_count = 0;
4218                 ap->ability_match = 0;
4219                 ap->idle_match = 0;
4220                 ap->ack_match = 0;
4221         }
4222         ap->cur_time++;
4223
4224         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4225                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4226
4227                 if (rx_cfg_reg != ap->ability_match_cfg) {
4228                         ap->ability_match_cfg = rx_cfg_reg;
4229                         ap->ability_match = 0;
4230                         ap->ability_match_count = 0;
4231                 } else {
4232                         if (++ap->ability_match_count > 1) {
4233                                 ap->ability_match = 1;
4234                                 ap->ability_match_cfg = rx_cfg_reg;
4235                         }
4236                 }
4237                 if (rx_cfg_reg & ANEG_CFG_ACK)
4238                         ap->ack_match = 1;
4239                 else
4240                         ap->ack_match = 0;
4241
4242                 ap->idle_match = 0;
4243         } else {
4244                 ap->idle_match = 1;
4245                 ap->ability_match_cfg = 0;
4246                 ap->ability_match_count = 0;
4247                 ap->ability_match = 0;
4248                 ap->ack_match = 0;
4249
4250                 rx_cfg_reg = 0;
4251         }
4252
4253         ap->rxconfig = rx_cfg_reg;
4254         ret = ANEG_OK;
4255
4256         switch (ap->state) {
4257         case ANEG_STATE_UNKNOWN:
4258                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4259                         ap->state = ANEG_STATE_AN_ENABLE;
4260
4261                 /* fallthru */
4262         case ANEG_STATE_AN_ENABLE:
4263                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4264                 if (ap->flags & MR_AN_ENABLE) {
4265                         ap->link_time = 0;
4266                         ap->cur_time = 0;
4267                         ap->ability_match_cfg = 0;
4268                         ap->ability_match_count = 0;
4269                         ap->ability_match = 0;
4270                         ap->idle_match = 0;
4271                         ap->ack_match = 0;
4272
4273                         ap->state = ANEG_STATE_RESTART_INIT;
4274                 } else {
4275                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4276                 }
4277                 break;
4278
4279         case ANEG_STATE_RESTART_INIT:
4280                 ap->link_time = ap->cur_time;
4281                 ap->flags &= ~(MR_NP_LOADED);
4282                 ap->txconfig = 0;
4283                 tw32(MAC_TX_AUTO_NEG, 0);
4284                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4285                 tw32_f(MAC_MODE, tp->mac_mode);
4286                 udelay(40);
4287
4288                 ret = ANEG_TIMER_ENAB;
4289                 ap->state = ANEG_STATE_RESTART;
4290
4291                 /* fallthru */
4292         case ANEG_STATE_RESTART:
4293                 delta = ap->cur_time - ap->link_time;
4294                 if (delta > ANEG_STATE_SETTLE_TIME)
4295                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4296                 else
4297                         ret = ANEG_TIMER_ENAB;
4298                 break;
4299
4300         case ANEG_STATE_DISABLE_LINK_OK:
4301                 ret = ANEG_DONE;
4302                 break;
4303
4304         case ANEG_STATE_ABILITY_DETECT_INIT:
4305                 ap->flags &= ~(MR_TOGGLE_TX);
4306                 ap->txconfig = ANEG_CFG_FD;
4307                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4308                 if (flowctrl & ADVERTISE_1000XPAUSE)
4309                         ap->txconfig |= ANEG_CFG_PS1;
4310                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4311                         ap->txconfig |= ANEG_CFG_PS2;
4312                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4313                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4314                 tw32_f(MAC_MODE, tp->mac_mode);
4315                 udelay(40);
4316
4317                 ap->state = ANEG_STATE_ABILITY_DETECT;
4318                 break;
4319
4320         case ANEG_STATE_ABILITY_DETECT:
4321                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4322                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4323                 break;
4324
4325         case ANEG_STATE_ACK_DETECT_INIT:
4326                 ap->txconfig |= ANEG_CFG_ACK;
4327                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4328                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4329                 tw32_f(MAC_MODE, tp->mac_mode);
4330                 udelay(40);
4331
4332                 ap->state = ANEG_STATE_ACK_DETECT;
4333
4334                 /* fallthru */
4335         case ANEG_STATE_ACK_DETECT:
4336                 if (ap->ack_match != 0) {
4337                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4338                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4339                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4340                         } else {
4341                                 ap->state = ANEG_STATE_AN_ENABLE;
4342                         }
4343                 } else if (ap->ability_match != 0 &&
4344                            ap->rxconfig == 0) {
4345                         ap->state = ANEG_STATE_AN_ENABLE;
4346                 }
4347                 break;
4348
4349         case ANEG_STATE_COMPLETE_ACK_INIT:
4350                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4351                         ret = ANEG_FAILED;
4352                         break;
4353                 }
4354                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4355                                MR_LP_ADV_HALF_DUPLEX |
4356                                MR_LP_ADV_SYM_PAUSE |
4357                                MR_LP_ADV_ASYM_PAUSE |
4358                                MR_LP_ADV_REMOTE_FAULT1 |
4359                                MR_LP_ADV_REMOTE_FAULT2 |
4360                                MR_LP_ADV_NEXT_PAGE |
4361                                MR_TOGGLE_RX |
4362                                MR_NP_RX);
4363                 if (ap->rxconfig & ANEG_CFG_FD)
4364                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4365                 if (ap->rxconfig & ANEG_CFG_HD)
4366                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4367                 if (ap->rxconfig & ANEG_CFG_PS1)
4368                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4369                 if (ap->rxconfig & ANEG_CFG_PS2)
4370                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4371                 if (ap->rxconfig & ANEG_CFG_RF1)
4372                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4373                 if (ap->rxconfig & ANEG_CFG_RF2)
4374                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4375                 if (ap->rxconfig & ANEG_CFG_NP)
4376                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4377
4378                 ap->link_time = ap->cur_time;
4379
4380                 ap->flags ^= (MR_TOGGLE_TX);
4381                 if (ap->rxconfig & 0x0008)
4382                         ap->flags |= MR_TOGGLE_RX;
4383                 if (ap->rxconfig & ANEG_CFG_NP)
4384                         ap->flags |= MR_NP_RX;
4385                 ap->flags |= MR_PAGE_RX;
4386
4387                 ap->state = ANEG_STATE_COMPLETE_ACK;
4388                 ret = ANEG_TIMER_ENAB;
4389                 break;
4390
4391         case ANEG_STATE_COMPLETE_ACK:
4392                 if (ap->ability_match != 0 &&
4393                     ap->rxconfig == 0) {
4394                         ap->state = ANEG_STATE_AN_ENABLE;
4395                         break;
4396                 }
4397                 delta = ap->cur_time - ap->link_time;
4398                 if (delta > ANEG_STATE_SETTLE_TIME) {
4399                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4400                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4401                         } else {
4402                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4403                                     !(ap->flags & MR_NP_RX)) {
4404                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4405                                 } else {
4406                                         ret = ANEG_FAILED;
4407                                 }
4408                         }
4409                 }
4410                 break;
4411
4412         case ANEG_STATE_IDLE_DETECT_INIT:
4413                 ap->link_time = ap->cur_time;
4414                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4415                 tw32_f(MAC_MODE, tp->mac_mode);
4416                 udelay(40);
4417
4418                 ap->state = ANEG_STATE_IDLE_DETECT;
4419                 ret = ANEG_TIMER_ENAB;
4420                 break;
4421
4422         case ANEG_STATE_IDLE_DETECT:
4423                 if (ap->ability_match != 0 &&
4424                     ap->rxconfig == 0) {
4425                         ap->state = ANEG_STATE_AN_ENABLE;
4426                         break;
4427                 }
4428                 delta = ap->cur_time - ap->link_time;
4429                 if (delta > ANEG_STATE_SETTLE_TIME) {
4430                         /* XXX another gem from the Broadcom driver :( */
4431                         ap->state = ANEG_STATE_LINK_OK;
4432                 }
4433                 break;
4434
4435         case ANEG_STATE_LINK_OK:
4436                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4437                 ret = ANEG_DONE;
4438                 break;
4439
4440         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4441                 /* ??? unimplemented */
4442                 break;
4443
4444         case ANEG_STATE_NEXT_PAGE_WAIT:
4445                 /* ??? unimplemented */
4446                 break;
4447
4448         default:
4449                 ret = ANEG_FAILED;
4450                 break;
4451         }
4452
4453         return ret;
4454 }
4455
4456 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4457 {
4458         int res = 0;
4459         struct tg3_fiber_aneginfo aninfo;
4460         int status = ANEG_FAILED;
4461         unsigned int tick;
4462         u32 tmp;
4463
4464         tw32_f(MAC_TX_AUTO_NEG, 0);
4465
4466         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4467         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4468         udelay(40);
4469
4470         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4471         udelay(40);
4472
4473         memset(&aninfo, 0, sizeof(aninfo));
4474         aninfo.flags |= MR_AN_ENABLE;
4475         aninfo.state = ANEG_STATE_UNKNOWN;
4476         aninfo.cur_time = 0;
4477         tick = 0;
4478         while (++tick < 195000) {
4479                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4480                 if (status == ANEG_DONE || status == ANEG_FAILED)
4481                         break;
4482
4483                 udelay(1);
4484         }
4485
4486         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4487         tw32_f(MAC_MODE, tp->mac_mode);
4488         udelay(40);
4489
4490         *txflags = aninfo.txconfig;
4491         *rxflags = aninfo.flags;
4492
4493         if (status == ANEG_DONE &&
4494             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4495                              MR_LP_ADV_FULL_DUPLEX)))
4496                 res = 1;
4497
4498         return res;
4499 }
4500
4501 static void tg3_init_bcm8002(struct tg3 *tp)
4502 {
4503         u32 mac_status = tr32(MAC_STATUS);
4504         int i;
4505
4506         /* Reset when initting first time or we have a link. */
4507         if (tg3_flag(tp, INIT_COMPLETE) &&
4508             !(mac_status & MAC_STATUS_PCS_SYNCED))
4509                 return;
4510
4511         /* Set PLL lock range. */
4512         tg3_writephy(tp, 0x16, 0x8007);
4513
4514         /* SW reset */
4515         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4516
4517         /* Wait for reset to complete. */
4518         /* XXX schedule_timeout() ... */
4519         for (i = 0; i < 500; i++)
4520                 udelay(10);
4521
4522         /* Config mode; select PMA/Ch 1 regs. */
4523         tg3_writephy(tp, 0x10, 0x8411);
4524
4525         /* Enable auto-lock and comdet, select txclk for tx. */
4526         tg3_writephy(tp, 0x11, 0x0a10);
4527
4528         tg3_writephy(tp, 0x18, 0x00a0);
4529         tg3_writephy(tp, 0x16, 0x41ff);
4530
4531         /* Assert and deassert POR. */
4532         tg3_writephy(tp, 0x13, 0x0400);
4533         udelay(40);
4534         tg3_writephy(tp, 0x13, 0x0000);
4535
4536         tg3_writephy(tp, 0x11, 0x0a50);
4537         udelay(40);
4538         tg3_writephy(tp, 0x11, 0x0a10);
4539
4540         /* Wait for signal to stabilize */
4541         /* XXX schedule_timeout() ... */
4542         for (i = 0; i < 15000; i++)
4543                 udelay(10);
4544
4545         /* Deselect the channel register so we can read the PHYID
4546          * later.
4547          */
4548         tg3_writephy(tp, 0x10, 0x8011);
4549 }
4550
4551 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4552 {
4553         u16 flowctrl;
4554         u32 sg_dig_ctrl, sg_dig_status;
4555         u32 serdes_cfg, expected_sg_dig_ctrl;
4556         int workaround, port_a;
4557         int current_link_up;
4558
4559         serdes_cfg = 0;
4560         expected_sg_dig_ctrl = 0;
4561         workaround = 0;
4562         port_a = 1;
4563         current_link_up = 0;
4564
4565         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4566             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4567                 workaround = 1;
4568                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4569                         port_a = 0;
4570
4571                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4572                 /* preserve bits 20-23 for voltage regulator */
4573                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4574         }
4575
4576         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4577
4578         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4579                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4580                         if (workaround) {
4581                                 u32 val = serdes_cfg;
4582
4583                                 if (port_a)
4584                                         val |= 0xc010000;
4585                                 else
4586                                         val |= 0x4010000;
4587                                 tw32_f(MAC_SERDES_CFG, val);
4588                         }
4589
4590                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4591                 }
4592                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4593                         tg3_setup_flow_control(tp, 0, 0);
4594                         current_link_up = 1;
4595                 }
4596                 goto out;
4597         }
4598
4599         /* Want auto-negotiation.  */
4600         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4601
4602         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4603         if (flowctrl & ADVERTISE_1000XPAUSE)
4604                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4605         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4606                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4607
4608         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4609                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4610                     tp->serdes_counter &&
4611                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4612                                     MAC_STATUS_RCVD_CFG)) ==
4613                      MAC_STATUS_PCS_SYNCED)) {
4614                         tp->serdes_counter--;
4615                         current_link_up = 1;
4616                         goto out;
4617                 }
4618 restart_autoneg:
4619                 if (workaround)
4620                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4621                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4622                 udelay(5);
4623                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4624
4625                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4626                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4627         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4628                                  MAC_STATUS_SIGNAL_DET)) {
4629                 sg_dig_status = tr32(SG_DIG_STATUS);
4630                 mac_status = tr32(MAC_STATUS);
4631
4632                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4633                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4634                         u32 local_adv = 0, remote_adv = 0;
4635
4636                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4637                                 local_adv |= ADVERTISE_1000XPAUSE;
4638                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4639                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4640
4641                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4642                                 remote_adv |= LPA_1000XPAUSE;
4643                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4644                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4645
4646                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4647                         current_link_up = 1;
4648                         tp->serdes_counter = 0;
4649                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4650                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4651                         if (tp->serdes_counter)
4652                                 tp->serdes_counter--;
4653                         else {
4654                                 if (workaround) {
4655                                         u32 val = serdes_cfg;
4656
4657                                         if (port_a)
4658                                                 val |= 0xc010000;
4659                                         else
4660                                                 val |= 0x4010000;
4661
4662                                         tw32_f(MAC_SERDES_CFG, val);
4663                                 }
4664
4665                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4666                                 udelay(40);
4667
4668                                 /* Link parallel detection - link is up */
4669                                 /* only if we have PCS_SYNC and not */
4670                                 /* receiving config code words */
4671                                 mac_status = tr32(MAC_STATUS);
4672                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4673                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4674                                         tg3_setup_flow_control(tp, 0, 0);
4675                                         current_link_up = 1;
4676                                         tp->phy_flags |=
4677                                                 TG3_PHYFLG_PARALLEL_DETECT;
4678                                         tp->serdes_counter =
4679                                                 SERDES_PARALLEL_DET_TIMEOUT;
4680                                 } else
4681                                         goto restart_autoneg;
4682                         }
4683                 }
4684         } else {
4685                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4686                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4687         }
4688
4689 out:
4690         return current_link_up;
4691 }
4692
4693 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4694 {
4695         int current_link_up = 0;
4696
4697         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4698                 goto out;
4699
4700         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4701                 u32 txflags, rxflags;
4702                 int i;
4703
4704                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4705                         u32 local_adv = 0, remote_adv = 0;
4706
4707                         if (txflags & ANEG_CFG_PS1)
4708                                 local_adv |= ADVERTISE_1000XPAUSE;
4709                         if (txflags & ANEG_CFG_PS2)
4710                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4711
4712                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4713                                 remote_adv |= LPA_1000XPAUSE;
4714                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4715                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4716
4717                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4718
4719                         current_link_up = 1;
4720                 }
4721                 for (i = 0; i < 30; i++) {
4722                         udelay(20);
4723                         tw32_f(MAC_STATUS,
4724                                (MAC_STATUS_SYNC_CHANGED |
4725                                 MAC_STATUS_CFG_CHANGED));
4726                         udelay(40);
4727                         if ((tr32(MAC_STATUS) &
4728                              (MAC_STATUS_SYNC_CHANGED |
4729                               MAC_STATUS_CFG_CHANGED)) == 0)
4730                                 break;
4731                 }
4732
4733                 mac_status = tr32(MAC_STATUS);
4734                 if (current_link_up == 0 &&
4735                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4736                     !(mac_status & MAC_STATUS_RCVD_CFG))
4737                         current_link_up = 1;
4738         } else {
4739                 tg3_setup_flow_control(tp, 0, 0);
4740
4741                 /* Forcing 1000FD link up. */
4742                 current_link_up = 1;
4743
4744                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4745                 udelay(40);
4746
4747                 tw32_f(MAC_MODE, tp->mac_mode);
4748                 udelay(40);
4749         }
4750
4751 out:
4752         return current_link_up;
4753 }
4754
4755 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4756 {
4757         u32 orig_pause_cfg;
4758         u16 orig_active_speed;
4759         u8 orig_active_duplex;
4760         u32 mac_status;
4761         int current_link_up;
4762         int i;
4763
4764         orig_pause_cfg = tp->link_config.active_flowctrl;
4765         orig_active_speed = tp->link_config.active_speed;
4766         orig_active_duplex = tp->link_config.active_duplex;
4767
4768         if (!tg3_flag(tp, HW_AUTONEG) &&
4769             netif_carrier_ok(tp->dev) &&
4770             tg3_flag(tp, INIT_COMPLETE)) {
4771                 mac_status = tr32(MAC_STATUS);
4772                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4773                                MAC_STATUS_SIGNAL_DET |
4774                                MAC_STATUS_CFG_CHANGED |
4775                                MAC_STATUS_RCVD_CFG);
4776                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4777                                    MAC_STATUS_SIGNAL_DET)) {
4778                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4779                                             MAC_STATUS_CFG_CHANGED));
4780                         return 0;
4781                 }
4782         }
4783
4784         tw32_f(MAC_TX_AUTO_NEG, 0);
4785
4786         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4787         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4788         tw32_f(MAC_MODE, tp->mac_mode);
4789         udelay(40);
4790
4791         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4792                 tg3_init_bcm8002(tp);
4793
4794         /* Enable link change event even when serdes polling.  */
4795         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4796         udelay(40);
4797
4798         current_link_up = 0;
4799         mac_status = tr32(MAC_STATUS);
4800
4801         if (tg3_flag(tp, HW_AUTONEG))
4802                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4803         else
4804                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4805
4806         tp->napi[0].hw_status->status =
4807                 (SD_STATUS_UPDATED |
4808                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4809
4810         for (i = 0; i < 100; i++) {
4811                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4812                                     MAC_STATUS_CFG_CHANGED));
4813                 udelay(5);
4814                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4815                                          MAC_STATUS_CFG_CHANGED |
4816                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4817                         break;
4818         }
4819
4820         mac_status = tr32(MAC_STATUS);
4821         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4822                 current_link_up = 0;
4823                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4824                     tp->serdes_counter == 0) {
4825                         tw32_f(MAC_MODE, (tp->mac_mode |
4826                                           MAC_MODE_SEND_CONFIGS));
4827                         udelay(1);
4828                         tw32_f(MAC_MODE, tp->mac_mode);
4829                 }
4830         }
4831
4832         if (current_link_up == 1) {
4833                 tp->link_config.active_speed = SPEED_1000;
4834                 tp->link_config.active_duplex = DUPLEX_FULL;
4835                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4836                                     LED_CTRL_LNKLED_OVERRIDE |
4837                                     LED_CTRL_1000MBPS_ON));
4838         } else {
4839                 tp->link_config.active_speed = SPEED_INVALID;
4840                 tp->link_config.active_duplex = DUPLEX_INVALID;
4841                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4842                                     LED_CTRL_LNKLED_OVERRIDE |
4843                                     LED_CTRL_TRAFFIC_OVERRIDE));
4844         }
4845
4846         if (current_link_up != netif_carrier_ok(tp->dev)) {
4847                 if (current_link_up)
4848                         netif_carrier_on(tp->dev);
4849                 else
4850                         netif_carrier_off(tp->dev);
4851                 tg3_link_report(tp);
4852         } else {
4853                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4854                 if (orig_pause_cfg != now_pause_cfg ||
4855                     orig_active_speed != tp->link_config.active_speed ||
4856                     orig_active_duplex != tp->link_config.active_duplex)
4857                         tg3_link_report(tp);
4858         }
4859
4860         return 0;
4861 }
4862
4863 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4864 {
4865         int current_link_up, err = 0;
4866         u32 bmsr, bmcr;
4867         u16 current_speed;
4868         u8 current_duplex;
4869         u32 local_adv, remote_adv;
4870
4871         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4872         tw32_f(MAC_MODE, tp->mac_mode);
4873         udelay(40);
4874
4875         tw32(MAC_EVENT, 0);
4876
4877         tw32_f(MAC_STATUS,
4878              (MAC_STATUS_SYNC_CHANGED |
4879               MAC_STATUS_CFG_CHANGED |
4880               MAC_STATUS_MI_COMPLETION |
4881               MAC_STATUS_LNKSTATE_CHANGED));
4882         udelay(40);
4883
4884         if (force_reset)
4885                 tg3_phy_reset(tp);
4886
4887         current_link_up = 0;
4888         current_speed = SPEED_INVALID;
4889         current_duplex = DUPLEX_INVALID;
4890
4891         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4892         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4893         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4894                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4895                         bmsr |= BMSR_LSTATUS;
4896                 else
4897                         bmsr &= ~BMSR_LSTATUS;
4898         }
4899
4900         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4901
4902         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4903             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4904                 /* do nothing, just check for link up at the end */
4905         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4906                 u32 adv, new_adv;
4907
4908                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4909                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4910                                   ADVERTISE_1000XPAUSE |
4911                                   ADVERTISE_1000XPSE_ASYM |
4912                                   ADVERTISE_SLCT);
4913
4914                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4915
4916                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4917                         new_adv |= ADVERTISE_1000XHALF;
4918                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4919                         new_adv |= ADVERTISE_1000XFULL;
4920
4921                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4922                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4923                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4924                         tg3_writephy(tp, MII_BMCR, bmcr);
4925
4926                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4927                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4928                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4929
4930                         return err;
4931                 }
4932         } else {
4933                 u32 new_bmcr;
4934
4935                 bmcr &= ~BMCR_SPEED1000;
4936                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4937
4938                 if (tp->link_config.duplex == DUPLEX_FULL)
4939                         new_bmcr |= BMCR_FULLDPLX;
4940
4941                 if (new_bmcr != bmcr) {
4942                         /* BMCR_SPEED1000 is a reserved bit that needs
4943                          * to be set on write.
4944                          */
4945                         new_bmcr |= BMCR_SPEED1000;
4946
4947                         /* Force a linkdown */
4948                         if (netif_carrier_ok(tp->dev)) {
4949                                 u32 adv;
4950
4951                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4952                                 adv &= ~(ADVERTISE_1000XFULL |
4953                                          ADVERTISE_1000XHALF |
4954                                          ADVERTISE_SLCT);
4955                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4956                                 tg3_writephy(tp, MII_BMCR, bmcr |
4957                                                            BMCR_ANRESTART |
4958                                                            BMCR_ANENABLE);
4959                                 udelay(10);
4960                                 netif_carrier_off(tp->dev);
4961                         }
4962                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4963                         bmcr = new_bmcr;
4964                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4965                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4966                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4967                             ASIC_REV_5714) {
4968                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4969                                         bmsr |= BMSR_LSTATUS;
4970                                 else
4971                                         bmsr &= ~BMSR_LSTATUS;
4972                         }
4973                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4974                 }
4975         }
4976
4977         if (bmsr & BMSR_LSTATUS) {
4978                 current_speed = SPEED_1000;
4979                 current_link_up = 1;
4980                 if (bmcr & BMCR_FULLDPLX)
4981                         current_duplex = DUPLEX_FULL;
4982                 else
4983                         current_duplex = DUPLEX_HALF;
4984
4985                 local_adv = 0;
4986                 remote_adv = 0;
4987
4988                 if (bmcr & BMCR_ANENABLE) {
4989                         u32 common;
4990
4991                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4992                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4993                         common = local_adv & remote_adv;
4994                         if (common & (ADVERTISE_1000XHALF |
4995                                       ADVERTISE_1000XFULL)) {
4996                                 if (common & ADVERTISE_1000XFULL)
4997                                         current_duplex = DUPLEX_FULL;
4998                                 else
4999                                         current_duplex = DUPLEX_HALF;
5000                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5001                                 /* Link is up via parallel detect */
5002                         } else {
5003                                 current_link_up = 0;
5004                         }
5005                 }
5006         }
5007
5008         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5009                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5010
5011         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5012         if (tp->link_config.active_duplex == DUPLEX_HALF)
5013                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5014
5015         tw32_f(MAC_MODE, tp->mac_mode);
5016         udelay(40);
5017
5018         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5019
5020         tp->link_config.active_speed = current_speed;
5021         tp->link_config.active_duplex = current_duplex;
5022
5023         if (current_link_up != netif_carrier_ok(tp->dev)) {
5024                 if (current_link_up)
5025                         netif_carrier_on(tp->dev);
5026                 else {
5027                         netif_carrier_off(tp->dev);
5028                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5029                 }
5030                 tg3_link_report(tp);
5031         }
5032         return err;
5033 }
5034
5035 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5036 {
5037         if (tp->serdes_counter) {
5038                 /* Give autoneg time to complete. */
5039                 tp->serdes_counter--;
5040                 return;
5041         }
5042
5043         if (!netif_carrier_ok(tp->dev) &&
5044             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5045                 u32 bmcr;
5046
5047                 tg3_readphy(tp, MII_BMCR, &bmcr);
5048                 if (bmcr & BMCR_ANENABLE) {
5049                         u32 phy1, phy2;
5050
5051                         /* Select shadow register 0x1f */
5052                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5053                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5054
5055                         /* Select expansion interrupt status register */
5056                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5057                                          MII_TG3_DSP_EXP1_INT_STAT);
5058                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5059                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5060
5061                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5062                                 /* We have signal detect and not receiving
5063                                  * config code words, link is up by parallel
5064                                  * detection.
5065                                  */
5066
5067                                 bmcr &= ~BMCR_ANENABLE;
5068                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5069                                 tg3_writephy(tp, MII_BMCR, bmcr);
5070                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5071                         }
5072                 }
5073         } else if (netif_carrier_ok(tp->dev) &&
5074                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5075                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5076                 u32 phy2;
5077
5078                 /* Select expansion interrupt status register */
5079                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5080                                  MII_TG3_DSP_EXP1_INT_STAT);
5081                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5082                 if (phy2 & 0x20) {
5083                         u32 bmcr;
5084
5085                         /* Config code words received, turn on autoneg. */
5086                         tg3_readphy(tp, MII_BMCR, &bmcr);
5087                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5088
5089                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5090
5091                 }
5092         }
5093 }
5094
5095 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5096 {
5097         u32 val;
5098         int err;
5099
5100         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5101                 err = tg3_setup_fiber_phy(tp, force_reset);
5102         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5103                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5104         else
5105                 err = tg3_setup_copper_phy(tp, force_reset);
5106
5107         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5108                 u32 scale;
5109
5110                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5111                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5112                         scale = 65;
5113                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5114                         scale = 6;
5115                 else
5116                         scale = 12;
5117
5118                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5119                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5120                 tw32(GRC_MISC_CFG, val);
5121         }
5122
5123         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5124               (6 << TX_LENGTHS_IPG_SHIFT);
5125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5126                 val |= tr32(MAC_TX_LENGTHS) &
5127                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5128                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5129
5130         if (tp->link_config.active_speed == SPEED_1000 &&
5131             tp->link_config.active_duplex == DUPLEX_HALF)
5132                 tw32(MAC_TX_LENGTHS, val |
5133                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5134         else
5135                 tw32(MAC_TX_LENGTHS, val |
5136                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5137
5138         if (!tg3_flag(tp, 5705_PLUS)) {
5139                 if (netif_carrier_ok(tp->dev)) {
5140                         tw32(HOSTCC_STAT_COAL_TICKS,
5141                              tp->coal.stats_block_coalesce_usecs);
5142                 } else {
5143                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5144                 }
5145         }
5146
5147         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5148                 val = tr32(PCIE_PWR_MGMT_THRESH);
5149                 if (!netif_carrier_ok(tp->dev))
5150                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5151                               tp->pwrmgmt_thresh;
5152                 else
5153                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5154                 tw32(PCIE_PWR_MGMT_THRESH, val);
5155         }
5156
5157         return err;
5158 }
5159
5160 static inline int tg3_irq_sync(struct tg3 *tp)
5161 {
5162         return tp->irq_sync;
5163 }
5164
5165 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5166 {
5167         int i;
5168
5169         dst = (u32 *)((u8 *)dst + off);
5170         for (i = 0; i < len; i += sizeof(u32))
5171                 *dst++ = tr32(off + i);
5172 }
5173
5174 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5175 {
5176         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5177         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5178         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5179         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5180         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5181         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5182         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5183         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5184         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5185         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5186         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5187         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5188         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5189         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5190         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5191         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5192         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5193         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5194         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5195
5196         if (tg3_flag(tp, SUPPORT_MSIX))
5197                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5198
5199         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5200         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5201         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5202         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5203         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5204         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5205         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5206         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5207
5208         if (!tg3_flag(tp, 5705_PLUS)) {
5209                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5210                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5211                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5212         }
5213
5214         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5215         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5216         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5217         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5218         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5219
5220         if (tg3_flag(tp, NVRAM))
5221                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5222 }
5223
5224 static void tg3_dump_state(struct tg3 *tp)
5225 {
5226         int i;
5227         u32 *regs;
5228
5229         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5230         if (!regs) {
5231                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5232                 return;
5233         }
5234
5235         if (tg3_flag(tp, PCI_EXPRESS)) {
5236                 /* Read up to but not including private PCI registers */
5237                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5238                         regs[i / sizeof(u32)] = tr32(i);
5239         } else
5240                 tg3_dump_legacy_regs(tp, regs);
5241
5242         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5243                 if (!regs[i + 0] && !regs[i + 1] &&
5244                     !regs[i + 2] && !regs[i + 3])
5245                         continue;
5246
5247                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5248                            i * 4,
5249                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5250         }
5251
5252         kfree(regs);
5253
5254         for (i = 0; i < tp->irq_cnt; i++) {
5255                 struct tg3_napi *tnapi = &tp->napi[i];
5256
5257                 /* SW status block */
5258                 netdev_err(tp->dev,
5259                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5260                            i,
5261                            tnapi->hw_status->status,
5262                            tnapi->hw_status->status_tag,
5263                            tnapi->hw_status->rx_jumbo_consumer,
5264                            tnapi->hw_status->rx_consumer,
5265                            tnapi->hw_status->rx_mini_consumer,
5266                            tnapi->hw_status->idx[0].rx_producer,
5267                            tnapi->hw_status->idx[0].tx_consumer);
5268
5269                 netdev_err(tp->dev,
5270                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5271                            i,
5272                            tnapi->last_tag, tnapi->last_irq_tag,
5273                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5274                            tnapi->rx_rcb_ptr,
5275                            tnapi->prodring.rx_std_prod_idx,
5276                            tnapi->prodring.rx_std_cons_idx,
5277                            tnapi->prodring.rx_jmb_prod_idx,
5278                            tnapi->prodring.rx_jmb_cons_idx);
5279         }
5280 }
5281
5282 /* This is called whenever we suspect that the system chipset is re-
5283  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5284  * is bogus tx completions. We try to recover by setting the
5285  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5286  * in the workqueue.
5287  */
5288 static void tg3_tx_recover(struct tg3 *tp)
5289 {
5290         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5291                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5292
5293         netdev_warn(tp->dev,
5294                     "The system may be re-ordering memory-mapped I/O "
5295                     "cycles to the network device, attempting to recover. "
5296                     "Please report the problem to the driver maintainer "
5297                     "and include system chipset information.\n");
5298
5299         spin_lock(&tp->lock);
5300         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5301         spin_unlock(&tp->lock);
5302 }
5303
5304 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5305 {
5306         /* Tell compiler to fetch tx indices from memory. */
5307         barrier();
5308         return tnapi->tx_pending -
5309                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5310 }
5311
5312 /* Tigon3 never reports partial packet sends.  So we do not
5313  * need special logic to handle SKBs that have not had all
5314  * of their frags sent yet, like SunGEM does.
5315  */
5316 static void tg3_tx(struct tg3_napi *tnapi)
5317 {
5318         struct tg3 *tp = tnapi->tp;
5319         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5320         u32 sw_idx = tnapi->tx_cons;
5321         struct netdev_queue *txq;
5322         int index = tnapi - tp->napi;
5323
5324         if (tg3_flag(tp, ENABLE_TSS))
5325                 index--;
5326
5327         txq = netdev_get_tx_queue(tp->dev, index);
5328
5329         while (sw_idx != hw_idx) {
5330                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5331                 struct sk_buff *skb = ri->skb;
5332                 int i, tx_bug = 0;
5333
5334                 if (unlikely(skb == NULL)) {
5335                         tg3_tx_recover(tp);
5336                         return;
5337                 }
5338
5339                 pci_unmap_single(tp->pdev,
5340                                  dma_unmap_addr(ri, mapping),
5341                                  skb_headlen(skb),
5342                                  PCI_DMA_TODEVICE);
5343
5344                 ri->skb = NULL;
5345
5346                 while (ri->fragmented) {
5347                         ri->fragmented = false;
5348                         sw_idx = NEXT_TX(sw_idx);
5349                         ri = &tnapi->tx_buffers[sw_idx];
5350                 }
5351
5352                 sw_idx = NEXT_TX(sw_idx);
5353
5354                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5355                         ri = &tnapi->tx_buffers[sw_idx];
5356                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5357                                 tx_bug = 1;
5358
5359                         pci_unmap_page(tp->pdev,
5360                                        dma_unmap_addr(ri, mapping),
5361                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5362                                        PCI_DMA_TODEVICE);
5363
5364                         while (ri->fragmented) {
5365                                 ri->fragmented = false;
5366                                 sw_idx = NEXT_TX(sw_idx);
5367                                 ri = &tnapi->tx_buffers[sw_idx];
5368                         }
5369
5370                         sw_idx = NEXT_TX(sw_idx);
5371                 }
5372
5373                 dev_kfree_skb(skb);
5374
5375                 if (unlikely(tx_bug)) {
5376                         tg3_tx_recover(tp);
5377                         return;
5378                 }
5379         }
5380
5381         tnapi->tx_cons = sw_idx;
5382
5383         /* Need to make the tx_cons update visible to tg3_start_xmit()
5384          * before checking for netif_queue_stopped().  Without the
5385          * memory barrier, there is a small possibility that tg3_start_xmit()
5386          * will miss it and cause the queue to be stopped forever.
5387          */
5388         smp_mb();
5389
5390         if (unlikely(netif_tx_queue_stopped(txq) &&
5391                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5392                 __netif_tx_lock(txq, smp_processor_id());
5393                 if (netif_tx_queue_stopped(txq) &&
5394                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5395                         netif_tx_wake_queue(txq);
5396                 __netif_tx_unlock(txq);
5397         }
5398 }
5399
5400 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5401 {
5402         if (!ri->skb)
5403                 return;
5404
5405         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5406                          map_sz, PCI_DMA_FROMDEVICE);
5407         dev_kfree_skb_any(ri->skb);
5408         ri->skb = NULL;
5409 }
5410
5411 /* Returns size of skb allocated or < 0 on error.
5412  *
5413  * We only need to fill in the address because the other members
5414  * of the RX descriptor are invariant, see tg3_init_rings.
5415  *
5416  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5417  * posting buffers we only dirty the first cache line of the RX
5418  * descriptor (containing the address).  Whereas for the RX status
5419  * buffers the cpu only reads the last cacheline of the RX descriptor
5420  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5421  */
5422 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5423                             u32 opaque_key, u32 dest_idx_unmasked)
5424 {
5425         struct tg3_rx_buffer_desc *desc;
5426         struct ring_info *map;
5427         struct sk_buff *skb;
5428         dma_addr_t mapping;
5429         int skb_size, dest_idx;
5430
5431         switch (opaque_key) {
5432         case RXD_OPAQUE_RING_STD:
5433                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5434                 desc = &tpr->rx_std[dest_idx];
5435                 map = &tpr->rx_std_buffers[dest_idx];
5436                 skb_size = tp->rx_pkt_map_sz;
5437                 break;
5438
5439         case RXD_OPAQUE_RING_JUMBO:
5440                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5441                 desc = &tpr->rx_jmb[dest_idx].std;
5442                 map = &tpr->rx_jmb_buffers[dest_idx];
5443                 skb_size = TG3_RX_JMB_MAP_SZ;
5444                 break;
5445
5446         default:
5447                 return -EINVAL;
5448         }
5449
5450         /* Do not overwrite any of the map or rp information
5451          * until we are sure we can commit to a new buffer.
5452          *
5453          * Callers depend upon this behavior and assume that
5454          * we leave everything unchanged if we fail.
5455          */
5456         skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5457         if (skb == NULL)
5458                 return -ENOMEM;
5459
5460         skb_reserve(skb, TG3_RX_OFFSET(tp));
5461
5462         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5463                                  PCI_DMA_FROMDEVICE);
5464         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5465                 dev_kfree_skb(skb);
5466                 return -EIO;
5467         }
5468
5469         map->skb = skb;
5470         dma_unmap_addr_set(map, mapping, mapping);
5471
5472         desc->addr_hi = ((u64)mapping >> 32);
5473         desc->addr_lo = ((u64)mapping & 0xffffffff);
5474
5475         return skb_size;
5476 }
5477
5478 /* We only need to move over in the address because the other
5479  * members of the RX descriptor are invariant.  See notes above
5480  * tg3_alloc_rx_skb for full details.
5481  */
5482 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5483                            struct tg3_rx_prodring_set *dpr,
5484                            u32 opaque_key, int src_idx,
5485                            u32 dest_idx_unmasked)
5486 {
5487         struct tg3 *tp = tnapi->tp;
5488         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5489         struct ring_info *src_map, *dest_map;
5490         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5491         int dest_idx;
5492
5493         switch (opaque_key) {
5494         case RXD_OPAQUE_RING_STD:
5495                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5496                 dest_desc = &dpr->rx_std[dest_idx];
5497                 dest_map = &dpr->rx_std_buffers[dest_idx];
5498                 src_desc = &spr->rx_std[src_idx];
5499                 src_map = &spr->rx_std_buffers[src_idx];
5500                 break;
5501
5502         case RXD_OPAQUE_RING_JUMBO:
5503                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5504                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5505                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5506                 src_desc = &spr->rx_jmb[src_idx].std;
5507                 src_map = &spr->rx_jmb_buffers[src_idx];
5508                 break;
5509
5510         default:
5511                 return;
5512         }
5513
5514         dest_map->skb = src_map->skb;
5515         dma_unmap_addr_set(dest_map, mapping,
5516                            dma_unmap_addr(src_map, mapping));
5517         dest_desc->addr_hi = src_desc->addr_hi;
5518         dest_desc->addr_lo = src_desc->addr_lo;
5519
5520         /* Ensure that the update to the skb happens after the physical
5521          * addresses have been transferred to the new BD location.
5522          */
5523         smp_wmb();
5524
5525         src_map->skb = NULL;
5526 }
5527
5528 /* The RX ring scheme is composed of multiple rings which post fresh
5529  * buffers to the chip, and one special ring the chip uses to report
5530  * status back to the host.
5531  *
5532  * The special ring reports the status of received packets to the
5533  * host.  The chip does not write into the original descriptor the
5534  * RX buffer was obtained from.  The chip simply takes the original
5535  * descriptor as provided by the host, updates the status and length
5536  * field, then writes this into the next status ring entry.
5537  *
5538  * Each ring the host uses to post buffers to the chip is described
5539  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5540  * it is first placed into the on-chip ram.  When the packet's length
5541  * is known, it walks down the TG3_BDINFO entries to select the ring.
5542  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5543  * which is within the range of the new packet's length is chosen.
5544  *
5545  * The "separate ring for rx status" scheme may sound queer, but it makes
5546  * sense from a cache coherency perspective.  If only the host writes
5547  * to the buffer post rings, and only the chip writes to the rx status
5548  * rings, then cache lines never move beyond shared-modified state.
5549  * If both the host and chip were to write into the same ring, cache line
5550  * eviction could occur since both entities want it in an exclusive state.
5551  */
5552 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5553 {
5554         struct tg3 *tp = tnapi->tp;
5555         u32 work_mask, rx_std_posted = 0;
5556         u32 std_prod_idx, jmb_prod_idx;
5557         u32 sw_idx = tnapi->rx_rcb_ptr;
5558         u16 hw_idx;
5559         int received;
5560         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5561
5562         hw_idx = *(tnapi->rx_rcb_prod_idx);
5563         /*
5564          * We need to order the read of hw_idx and the read of
5565          * the opaque cookie.
5566          */
5567         rmb();
5568         work_mask = 0;
5569         received = 0;
5570         std_prod_idx = tpr->rx_std_prod_idx;
5571         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5572         while (sw_idx != hw_idx && budget > 0) {
5573                 struct ring_info *ri;
5574                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5575                 unsigned int len;
5576                 struct sk_buff *skb;
5577                 dma_addr_t dma_addr;
5578                 u32 opaque_key, desc_idx, *post_ptr;
5579
5580                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5581                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5582                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5583                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5584                         dma_addr = dma_unmap_addr(ri, mapping);
5585                         skb = ri->skb;
5586                         post_ptr = &std_prod_idx;
5587                         rx_std_posted++;
5588                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5589                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5590                         dma_addr = dma_unmap_addr(ri, mapping);
5591                         skb = ri->skb;
5592                         post_ptr = &jmb_prod_idx;
5593                 } else
5594                         goto next_pkt_nopost;
5595
5596                 work_mask |= opaque_key;
5597
5598                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5599                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5600                 drop_it:
5601                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5602                                        desc_idx, *post_ptr);
5603                 drop_it_no_recycle:
5604                         /* Other statistics kept track of by card. */
5605                         tp->rx_dropped++;
5606                         goto next_pkt;
5607                 }
5608
5609                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5610                       ETH_FCS_LEN;
5611
5612                 if (len > TG3_RX_COPY_THRESH(tp)) {
5613                         int skb_size;
5614
5615                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5616                                                     *post_ptr);
5617                         if (skb_size < 0)
5618                                 goto drop_it;
5619
5620                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5621                                          PCI_DMA_FROMDEVICE);
5622
5623                         /* Ensure that the update to the skb happens
5624                          * after the usage of the old DMA mapping.
5625                          */
5626                         smp_wmb();
5627
5628                         ri->skb = NULL;
5629
5630                         skb_put(skb, len);
5631                 } else {
5632                         struct sk_buff *copy_skb;
5633
5634                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5635                                        desc_idx, *post_ptr);
5636
5637                         copy_skb = netdev_alloc_skb(tp->dev, len +
5638                                                     TG3_RAW_IP_ALIGN);
5639                         if (copy_skb == NULL)
5640                                 goto drop_it_no_recycle;
5641
5642                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5643                         skb_put(copy_skb, len);
5644                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5645                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5646                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5647
5648                         /* We'll reuse the original ring buffer. */
5649                         skb = copy_skb;
5650                 }
5651
5652                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5653                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5654                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5655                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5656                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5657                 else
5658                         skb_checksum_none_assert(skb);
5659
5660                 skb->protocol = eth_type_trans(skb, tp->dev);
5661
5662                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5663                     skb->protocol != htons(ETH_P_8021Q)) {
5664                         dev_kfree_skb(skb);
5665                         goto drop_it_no_recycle;
5666                 }
5667
5668                 if (desc->type_flags & RXD_FLAG_VLAN &&
5669                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5670                         __vlan_hwaccel_put_tag(skb,
5671                                                desc->err_vlan & RXD_VLAN_MASK);
5672
5673                 napi_gro_receive(&tnapi->napi, skb);
5674
5675                 received++;
5676                 budget--;
5677
5678 next_pkt:
5679                 (*post_ptr)++;
5680
5681                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5682                         tpr->rx_std_prod_idx = std_prod_idx &
5683                                                tp->rx_std_ring_mask;
5684                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5685                                      tpr->rx_std_prod_idx);
5686                         work_mask &= ~RXD_OPAQUE_RING_STD;
5687                         rx_std_posted = 0;
5688                 }
5689 next_pkt_nopost:
5690                 sw_idx++;
5691                 sw_idx &= tp->rx_ret_ring_mask;
5692
5693                 /* Refresh hw_idx to see if there is new work */
5694                 if (sw_idx == hw_idx) {
5695                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5696                         rmb();
5697                 }
5698         }
5699
5700         /* ACK the status ring. */
5701         tnapi->rx_rcb_ptr = sw_idx;
5702         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5703
5704         /* Refill RX ring(s). */
5705         if (!tg3_flag(tp, ENABLE_RSS)) {
5706                 if (work_mask & RXD_OPAQUE_RING_STD) {
5707                         tpr->rx_std_prod_idx = std_prod_idx &
5708                                                tp->rx_std_ring_mask;
5709                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5710                                      tpr->rx_std_prod_idx);
5711                 }
5712                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5713                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5714                                                tp->rx_jmb_ring_mask;
5715                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5716                                      tpr->rx_jmb_prod_idx);
5717                 }
5718                 mmiowb();
5719         } else if (work_mask) {
5720                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5721                  * updated before the producer indices can be updated.
5722                  */
5723                 smp_wmb();
5724
5725                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5726                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5727
5728                 if (tnapi != &tp->napi[1])
5729                         napi_schedule(&tp->napi[1].napi);
5730         }
5731
5732         return received;
5733 }
5734
5735 static void tg3_poll_link(struct tg3 *tp)
5736 {
5737         /* handle link change and other phy events */
5738         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5739                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5740
5741                 if (sblk->status & SD_STATUS_LINK_CHG) {
5742                         sblk->status = SD_STATUS_UPDATED |
5743                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5744                         spin_lock(&tp->lock);
5745                         if (tg3_flag(tp, USE_PHYLIB)) {
5746                                 tw32_f(MAC_STATUS,
5747                                      (MAC_STATUS_SYNC_CHANGED |
5748                                       MAC_STATUS_CFG_CHANGED |
5749                                       MAC_STATUS_MI_COMPLETION |
5750                                       MAC_STATUS_LNKSTATE_CHANGED));
5751                                 udelay(40);
5752                         } else
5753                                 tg3_setup_phy(tp, 0);
5754                         spin_unlock(&tp->lock);
5755                 }
5756         }
5757 }
5758
5759 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5760                                 struct tg3_rx_prodring_set *dpr,
5761                                 struct tg3_rx_prodring_set *spr)
5762 {
5763         u32 si, di, cpycnt, src_prod_idx;
5764         int i, err = 0;
5765
5766         while (1) {
5767                 src_prod_idx = spr->rx_std_prod_idx;
5768
5769                 /* Make sure updates to the rx_std_buffers[] entries and the
5770                  * standard producer index are seen in the correct order.
5771                  */
5772                 smp_rmb();
5773
5774                 if (spr->rx_std_cons_idx == src_prod_idx)
5775                         break;
5776
5777                 if (spr->rx_std_cons_idx < src_prod_idx)
5778                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5779                 else
5780                         cpycnt = tp->rx_std_ring_mask + 1 -
5781                                  spr->rx_std_cons_idx;
5782
5783                 cpycnt = min(cpycnt,
5784                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5785
5786                 si = spr->rx_std_cons_idx;
5787                 di = dpr->rx_std_prod_idx;
5788
5789                 for (i = di; i < di + cpycnt; i++) {
5790                         if (dpr->rx_std_buffers[i].skb) {
5791                                 cpycnt = i - di;
5792                                 err = -ENOSPC;
5793                                 break;
5794                         }
5795                 }
5796
5797                 if (!cpycnt)
5798                         break;
5799
5800                 /* Ensure that updates to the rx_std_buffers ring and the
5801                  * shadowed hardware producer ring from tg3_recycle_skb() are
5802                  * ordered correctly WRT the skb check above.
5803                  */
5804                 smp_rmb();
5805
5806                 memcpy(&dpr->rx_std_buffers[di],
5807                        &spr->rx_std_buffers[si],
5808                        cpycnt * sizeof(struct ring_info));
5809
5810                 for (i = 0; i < cpycnt; i++, di++, si++) {
5811                         struct tg3_rx_buffer_desc *sbd, *dbd;
5812                         sbd = &spr->rx_std[si];
5813                         dbd = &dpr->rx_std[di];
5814                         dbd->addr_hi = sbd->addr_hi;
5815                         dbd->addr_lo = sbd->addr_lo;
5816                 }
5817
5818                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5819                                        tp->rx_std_ring_mask;
5820                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5821                                        tp->rx_std_ring_mask;
5822         }
5823
5824         while (1) {
5825                 src_prod_idx = spr->rx_jmb_prod_idx;
5826
5827                 /* Make sure updates to the rx_jmb_buffers[] entries and
5828                  * the jumbo producer index are seen in the correct order.
5829                  */
5830                 smp_rmb();
5831
5832                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5833                         break;
5834
5835                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5836                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5837                 else
5838                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5839                                  spr->rx_jmb_cons_idx;
5840
5841                 cpycnt = min(cpycnt,
5842                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5843
5844                 si = spr->rx_jmb_cons_idx;
5845                 di = dpr->rx_jmb_prod_idx;
5846
5847                 for (i = di; i < di + cpycnt; i++) {
5848                         if (dpr->rx_jmb_buffers[i].skb) {
5849                                 cpycnt = i - di;
5850                                 err = -ENOSPC;
5851                                 break;
5852                         }
5853                 }
5854
5855                 if (!cpycnt)
5856                         break;
5857
5858                 /* Ensure that updates to the rx_jmb_buffers ring and the
5859                  * shadowed hardware producer ring from tg3_recycle_skb() are
5860                  * ordered correctly WRT the skb check above.
5861                  */
5862                 smp_rmb();
5863
5864                 memcpy(&dpr->rx_jmb_buffers[di],
5865                        &spr->rx_jmb_buffers[si],
5866                        cpycnt * sizeof(struct ring_info));
5867
5868                 for (i = 0; i < cpycnt; i++, di++, si++) {
5869                         struct tg3_rx_buffer_desc *sbd, *dbd;
5870                         sbd = &spr->rx_jmb[si].std;
5871                         dbd = &dpr->rx_jmb[di].std;
5872                         dbd->addr_hi = sbd->addr_hi;
5873                         dbd->addr_lo = sbd->addr_lo;
5874                 }
5875
5876                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5877                                        tp->rx_jmb_ring_mask;
5878                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5879                                        tp->rx_jmb_ring_mask;
5880         }
5881
5882         return err;
5883 }
5884
5885 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5886 {
5887         struct tg3 *tp = tnapi->tp;
5888
5889         /* run TX completion thread */
5890         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5891                 tg3_tx(tnapi);
5892                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5893                         return work_done;
5894         }
5895
5896         /* run RX thread, within the bounds set by NAPI.
5897          * All RX "locking" is done by ensuring outside
5898          * code synchronizes with tg3->napi.poll()
5899          */
5900         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5901                 work_done += tg3_rx(tnapi, budget - work_done);
5902
5903         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5904                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5905                 int i, err = 0;
5906                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5907                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5908
5909                 for (i = 1; i < tp->irq_cnt; i++)
5910                         err |= tg3_rx_prodring_xfer(tp, dpr,
5911                                                     &tp->napi[i].prodring);
5912
5913                 wmb();
5914
5915                 if (std_prod_idx != dpr->rx_std_prod_idx)
5916                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5917                                      dpr->rx_std_prod_idx);
5918
5919                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5920                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5921                                      dpr->rx_jmb_prod_idx);
5922
5923                 mmiowb();
5924
5925                 if (err)
5926                         tw32_f(HOSTCC_MODE, tp->coal_now);
5927         }
5928
5929         return work_done;
5930 }
5931
5932 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5933 {
5934         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5935                 schedule_work(&tp->reset_task);
5936 }
5937
5938 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5939 {
5940         cancel_work_sync(&tp->reset_task);
5941         tg3_flag_clear(tp, RESET_TASK_PENDING);
5942 }
5943
5944 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5945 {
5946         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5947         struct tg3 *tp = tnapi->tp;
5948         int work_done = 0;
5949         struct tg3_hw_status *sblk = tnapi->hw_status;
5950
5951         while (1) {
5952                 work_done = tg3_poll_work(tnapi, work_done, budget);
5953
5954                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5955                         goto tx_recovery;
5956
5957                 if (unlikely(work_done >= budget))
5958                         break;
5959
5960                 /* tp->last_tag is used in tg3_int_reenable() below
5961                  * to tell the hw how much work has been processed,
5962                  * so we must read it before checking for more work.
5963                  */
5964                 tnapi->last_tag = sblk->status_tag;
5965                 tnapi->last_irq_tag = tnapi->last_tag;
5966                 rmb();
5967
5968                 /* check for RX/TX work to do */
5969                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5970                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5971                         napi_complete(napi);
5972                         /* Reenable interrupts. */
5973                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5974                         mmiowb();
5975                         break;
5976                 }
5977         }
5978
5979         return work_done;
5980
5981 tx_recovery:
5982         /* work_done is guaranteed to be less than budget. */
5983         napi_complete(napi);
5984         tg3_reset_task_schedule(tp);
5985         return work_done;
5986 }
5987
5988 static void tg3_process_error(struct tg3 *tp)
5989 {
5990         u32 val;
5991         bool real_error = false;
5992
5993         if (tg3_flag(tp, ERROR_PROCESSED))
5994                 return;
5995
5996         /* Check Flow Attention register */
5997         val = tr32(HOSTCC_FLOW_ATTN);
5998         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5999                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6000                 real_error = true;
6001         }
6002
6003         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6004                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6005                 real_error = true;
6006         }
6007
6008         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6009                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6010                 real_error = true;
6011         }
6012
6013         if (!real_error)
6014                 return;
6015
6016         tg3_dump_state(tp);
6017
6018         tg3_flag_set(tp, ERROR_PROCESSED);
6019         tg3_reset_task_schedule(tp);
6020 }
6021
6022 static int tg3_poll(struct napi_struct *napi, int budget)
6023 {
6024         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6025         struct tg3 *tp = tnapi->tp;
6026         int work_done = 0;
6027         struct tg3_hw_status *sblk = tnapi->hw_status;
6028
6029         while (1) {
6030                 if (sblk->status & SD_STATUS_ERROR)
6031                         tg3_process_error(tp);
6032
6033                 tg3_poll_link(tp);
6034
6035                 work_done = tg3_poll_work(tnapi, work_done, budget);
6036
6037                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6038                         goto tx_recovery;
6039
6040                 if (unlikely(work_done >= budget))
6041                         break;
6042
6043                 if (tg3_flag(tp, TAGGED_STATUS)) {
6044                         /* tp->last_tag is used in tg3_int_reenable() below
6045                          * to tell the hw how much work has been processed,
6046                          * so we must read it before checking for more work.
6047                          */
6048                         tnapi->last_tag = sblk->status_tag;
6049                         tnapi->last_irq_tag = tnapi->last_tag;
6050                         rmb();
6051                 } else
6052                         sblk->status &= ~SD_STATUS_UPDATED;
6053
6054                 if (likely(!tg3_has_work(tnapi))) {
6055                         napi_complete(napi);
6056                         tg3_int_reenable(tnapi);
6057                         break;
6058                 }
6059         }
6060
6061         return work_done;
6062
6063 tx_recovery:
6064         /* work_done is guaranteed to be less than budget. */
6065         napi_complete(napi);
6066         tg3_reset_task_schedule(tp);
6067         return work_done;
6068 }
6069
6070 static void tg3_napi_disable(struct tg3 *tp)
6071 {
6072         int i;
6073
6074         for (i = tp->irq_cnt - 1; i >= 0; i--)
6075                 napi_disable(&tp->napi[i].napi);
6076 }
6077
6078 static void tg3_napi_enable(struct tg3 *tp)
6079 {
6080         int i;
6081
6082         for (i = 0; i < tp->irq_cnt; i++)
6083                 napi_enable(&tp->napi[i].napi);
6084 }
6085
6086 static void tg3_napi_init(struct tg3 *tp)
6087 {
6088         int i;
6089
6090         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6091         for (i = 1; i < tp->irq_cnt; i++)
6092                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6093 }
6094
6095 static void tg3_napi_fini(struct tg3 *tp)
6096 {
6097         int i;
6098
6099         for (i = 0; i < tp->irq_cnt; i++)
6100                 netif_napi_del(&tp->napi[i].napi);
6101 }
6102
6103 static inline void tg3_netif_stop(struct tg3 *tp)
6104 {
6105         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6106         tg3_napi_disable(tp);
6107         netif_tx_disable(tp->dev);
6108 }
6109
6110 static inline void tg3_netif_start(struct tg3 *tp)
6111 {
6112         /* NOTE: unconditional netif_tx_wake_all_queues is only
6113          * appropriate so long as all callers are assured to
6114          * have free tx slots (such as after tg3_init_hw)
6115          */
6116         netif_tx_wake_all_queues(tp->dev);
6117
6118         tg3_napi_enable(tp);
6119         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6120         tg3_enable_ints(tp);
6121 }
6122
6123 static void tg3_irq_quiesce(struct tg3 *tp)
6124 {
6125         int i;
6126
6127         BUG_ON(tp->irq_sync);
6128
6129         tp->irq_sync = 1;
6130         smp_mb();
6131
6132         for (i = 0; i < tp->irq_cnt; i++)
6133                 synchronize_irq(tp->napi[i].irq_vec);
6134 }
6135
6136 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6137  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6138  * with as well.  Most of the time, this is not necessary except when
6139  * shutting down the device.
6140  */
6141 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6142 {
6143         spin_lock_bh(&tp->lock);
6144         if (irq_sync)
6145                 tg3_irq_quiesce(tp);
6146 }
6147
6148 static inline void tg3_full_unlock(struct tg3 *tp)
6149 {
6150         spin_unlock_bh(&tp->lock);
6151 }
6152
6153 /* One-shot MSI handler - Chip automatically disables interrupt
6154  * after sending MSI so driver doesn't have to do it.
6155  */
6156 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6157 {
6158         struct tg3_napi *tnapi = dev_id;
6159         struct tg3 *tp = tnapi->tp;
6160
6161         prefetch(tnapi->hw_status);
6162         if (tnapi->rx_rcb)
6163                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6164
6165         if (likely(!tg3_irq_sync(tp)))
6166                 napi_schedule(&tnapi->napi);
6167
6168         return IRQ_HANDLED;
6169 }
6170
6171 /* MSI ISR - No need to check for interrupt sharing and no need to
6172  * flush status block and interrupt mailbox. PCI ordering rules
6173  * guarantee that MSI will arrive after the status block.
6174  */
6175 static irqreturn_t tg3_msi(int irq, void *dev_id)
6176 {
6177         struct tg3_napi *tnapi = dev_id;
6178         struct tg3 *tp = tnapi->tp;
6179
6180         prefetch(tnapi->hw_status);
6181         if (tnapi->rx_rcb)
6182                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6183         /*
6184          * Writing any value to intr-mbox-0 clears PCI INTA# and
6185          * chip-internal interrupt pending events.
6186          * Writing non-zero to intr-mbox-0 additional tells the
6187          * NIC to stop sending us irqs, engaging "in-intr-handler"
6188          * event coalescing.
6189          */
6190         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6191         if (likely(!tg3_irq_sync(tp)))
6192                 napi_schedule(&tnapi->napi);
6193
6194         return IRQ_RETVAL(1);
6195 }
6196
6197 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6198 {
6199         struct tg3_napi *tnapi = dev_id;
6200         struct tg3 *tp = tnapi->tp;
6201         struct tg3_hw_status *sblk = tnapi->hw_status;
6202         unsigned int handled = 1;
6203
6204         /* In INTx mode, it is possible for the interrupt to arrive at
6205          * the CPU before the status block posted prior to the interrupt.
6206          * Reading the PCI State register will confirm whether the
6207          * interrupt is ours and will flush the status block.
6208          */
6209         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6210                 if (tg3_flag(tp, CHIP_RESETTING) ||
6211                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6212                         handled = 0;
6213                         goto out;
6214                 }
6215         }
6216
6217         /*
6218          * Writing any value to intr-mbox-0 clears PCI INTA# and
6219          * chip-internal interrupt pending events.
6220          * Writing non-zero to intr-mbox-0 additional tells the
6221          * NIC to stop sending us irqs, engaging "in-intr-handler"
6222          * event coalescing.
6223          *
6224          * Flush the mailbox to de-assert the IRQ immediately to prevent
6225          * spurious interrupts.  The flush impacts performance but
6226          * excessive spurious interrupts can be worse in some cases.
6227          */
6228         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6229         if (tg3_irq_sync(tp))
6230                 goto out;
6231         sblk->status &= ~SD_STATUS_UPDATED;
6232         if (likely(tg3_has_work(tnapi))) {
6233                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6234                 napi_schedule(&tnapi->napi);
6235         } else {
6236                 /* No work, shared interrupt perhaps?  re-enable
6237                  * interrupts, and flush that PCI write
6238                  */
6239                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6240                                0x00000000);
6241         }
6242 out:
6243         return IRQ_RETVAL(handled);
6244 }
6245
6246 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6247 {
6248         struct tg3_napi *tnapi = dev_id;
6249         struct tg3 *tp = tnapi->tp;
6250         struct tg3_hw_status *sblk = tnapi->hw_status;
6251         unsigned int handled = 1;
6252
6253         /* In INTx mode, it is possible for the interrupt to arrive at
6254          * the CPU before the status block posted prior to the interrupt.
6255          * Reading the PCI State register will confirm whether the
6256          * interrupt is ours and will flush the status block.
6257          */
6258         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6259                 if (tg3_flag(tp, CHIP_RESETTING) ||
6260                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6261                         handled = 0;
6262                         goto out;
6263                 }
6264         }
6265
6266         /*
6267          * writing any value to intr-mbox-0 clears PCI INTA# and
6268          * chip-internal interrupt pending events.
6269          * writing non-zero to intr-mbox-0 additional tells the
6270          * NIC to stop sending us irqs, engaging "in-intr-handler"
6271          * event coalescing.
6272          *
6273          * Flush the mailbox to de-assert the IRQ immediately to prevent
6274          * spurious interrupts.  The flush impacts performance but
6275          * excessive spurious interrupts can be worse in some cases.
6276          */
6277         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6278
6279         /*
6280          * In a shared interrupt configuration, sometimes other devices'
6281          * interrupts will scream.  We record the current status tag here
6282          * so that the above check can report that the screaming interrupts
6283          * are unhandled.  Eventually they will be silenced.
6284          */
6285         tnapi->last_irq_tag = sblk->status_tag;
6286
6287         if (tg3_irq_sync(tp))
6288                 goto out;
6289
6290         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6291
6292         napi_schedule(&tnapi->napi);
6293
6294 out:
6295         return IRQ_RETVAL(handled);
6296 }
6297
6298 /* ISR for interrupt test */
6299 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6300 {
6301         struct tg3_napi *tnapi = dev_id;
6302         struct tg3 *tp = tnapi->tp;
6303         struct tg3_hw_status *sblk = tnapi->hw_status;
6304
6305         if ((sblk->status & SD_STATUS_UPDATED) ||
6306             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6307                 tg3_disable_ints(tp);
6308                 return IRQ_RETVAL(1);
6309         }
6310         return IRQ_RETVAL(0);
6311 }
6312
6313 static int tg3_init_hw(struct tg3 *, int);
6314 static int tg3_halt(struct tg3 *, int, int);
6315
6316 /* Restart hardware after configuration changes, self-test, etc.
6317  * Invoked with tp->lock held.
6318  */
6319 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6320         __releases(tp->lock)
6321         __acquires(tp->lock)
6322 {
6323         int err;
6324
6325         err = tg3_init_hw(tp, reset_phy);
6326         if (err) {
6327                 netdev_err(tp->dev,
6328                            "Failed to re-initialize device, aborting\n");
6329                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6330                 tg3_full_unlock(tp);
6331                 del_timer_sync(&tp->timer);
6332                 tp->irq_sync = 0;
6333                 tg3_napi_enable(tp);
6334                 dev_close(tp->dev);
6335                 tg3_full_lock(tp, 0);
6336         }
6337         return err;
6338 }
6339
6340 #ifdef CONFIG_NET_POLL_CONTROLLER
6341 static void tg3_poll_controller(struct net_device *dev)
6342 {
6343         int i;
6344         struct tg3 *tp = netdev_priv(dev);
6345
6346         for (i = 0; i < tp->irq_cnt; i++)
6347                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6348 }
6349 #endif
6350
6351 static void tg3_reset_task(struct work_struct *work)
6352 {
6353         struct tg3 *tp = container_of(work, struct tg3, reset_task);
6354         int err;
6355
6356         tg3_full_lock(tp, 0);
6357
6358         if (!netif_running(tp->dev)) {
6359                 tg3_flag_clear(tp, RESET_TASK_PENDING);
6360                 tg3_full_unlock(tp);
6361                 return;
6362         }
6363
6364         tg3_full_unlock(tp);
6365
6366         tg3_phy_stop(tp);
6367
6368         tg3_netif_stop(tp);
6369
6370         tg3_full_lock(tp, 1);
6371
6372         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6373                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6374                 tp->write32_rx_mbox = tg3_write_flush_reg32;
6375                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6376                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6377         }
6378
6379         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6380         err = tg3_init_hw(tp, 1);
6381         if (err)
6382                 goto out;
6383
6384         tg3_netif_start(tp);
6385
6386 out:
6387         tg3_full_unlock(tp);
6388
6389         if (!err)
6390                 tg3_phy_start(tp);
6391
6392         tg3_flag_clear(tp, RESET_TASK_PENDING);
6393 }
6394
6395 static void tg3_tx_timeout(struct net_device *dev)
6396 {
6397         struct tg3 *tp = netdev_priv(dev);
6398
6399         if (netif_msg_tx_err(tp)) {
6400                 netdev_err(dev, "transmit timed out, resetting\n");
6401                 tg3_dump_state(tp);
6402         }
6403
6404         tg3_reset_task_schedule(tp);
6405 }
6406
6407 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6408 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6409 {
6410         u32 base = (u32) mapping & 0xffffffff;
6411
6412         return (base > 0xffffdcc0) && (base + len + 8 < base);
6413 }
6414
6415 /* Test for DMA addresses > 40-bit */
6416 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6417                                           int len)
6418 {
6419 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6420         if (tg3_flag(tp, 40BIT_DMA_BUG))
6421                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6422         return 0;
6423 #else
6424         return 0;
6425 #endif
6426 }
6427
6428 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6429                                  dma_addr_t mapping, u32 len, u32 flags,
6430                                  u32 mss, u32 vlan)
6431 {
6432         txbd->addr_hi = ((u64) mapping >> 32);
6433         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6434         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6435         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6436 }
6437
6438 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6439                             dma_addr_t map, u32 len, u32 flags,
6440                             u32 mss, u32 vlan)
6441 {
6442         struct tg3 *tp = tnapi->tp;
6443         bool hwbug = false;
6444
6445         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6446                 hwbug = 1;
6447
6448         if (tg3_4g_overflow_test(map, len))
6449                 hwbug = 1;
6450
6451         if (tg3_40bit_overflow_test(tp, map, len))
6452                 hwbug = 1;
6453
6454         if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6455                 u32 prvidx = *entry;
6456                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6457                 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6458                         u32 frag_len = TG3_TX_BD_DMA_MAX;
6459                         len -= TG3_TX_BD_DMA_MAX;
6460
6461                         /* Avoid the 8byte DMA problem */
6462                         if (len <= 8) {
6463                                 len += TG3_TX_BD_DMA_MAX / 2;
6464                                 frag_len = TG3_TX_BD_DMA_MAX / 2;
6465                         }
6466
6467                         tnapi->tx_buffers[*entry].fragmented = true;
6468
6469                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6470                                       frag_len, tmp_flag, mss, vlan);
6471                         *budget -= 1;
6472                         prvidx = *entry;
6473                         *entry = NEXT_TX(*entry);
6474
6475                         map += frag_len;
6476                 }
6477
6478                 if (len) {
6479                         if (*budget) {
6480                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6481                                               len, flags, mss, vlan);
6482                                 *budget -= 1;
6483                                 *entry = NEXT_TX(*entry);
6484                         } else {
6485                                 hwbug = 1;
6486                                 tnapi->tx_buffers[prvidx].fragmented = false;
6487                         }
6488                 }
6489         } else {
6490                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6491                               len, flags, mss, vlan);
6492                 *entry = NEXT_TX(*entry);
6493         }
6494
6495         return hwbug;
6496 }
6497
6498 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6499 {
6500         int i;
6501         struct sk_buff *skb;
6502         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6503
6504         skb = txb->skb;
6505         txb->skb = NULL;
6506
6507         pci_unmap_single(tnapi->tp->pdev,
6508                          dma_unmap_addr(txb, mapping),
6509                          skb_headlen(skb),
6510                          PCI_DMA_TODEVICE);
6511
6512         while (txb->fragmented) {
6513                 txb->fragmented = false;
6514                 entry = NEXT_TX(entry);
6515                 txb = &tnapi->tx_buffers[entry];
6516         }
6517
6518         for (i = 0; i <= last; i++) {
6519                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6520
6521                 entry = NEXT_TX(entry);
6522                 txb = &tnapi->tx_buffers[entry];
6523
6524                 pci_unmap_page(tnapi->tp->pdev,
6525                                dma_unmap_addr(txb, mapping),
6526                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6527
6528                 while (txb->fragmented) {
6529                         txb->fragmented = false;
6530                         entry = NEXT_TX(entry);
6531                         txb = &tnapi->tx_buffers[entry];
6532                 }
6533         }
6534 }
6535
6536 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6537 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6538                                        struct sk_buff **pskb,
6539                                        u32 *entry, u32 *budget,
6540                                        u32 base_flags, u32 mss, u32 vlan)
6541 {
6542         struct tg3 *tp = tnapi->tp;
6543         struct sk_buff *new_skb, *skb = *pskb;
6544         dma_addr_t new_addr = 0;
6545         int ret = 0;
6546
6547         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6548                 new_skb = skb_copy(skb, GFP_ATOMIC);
6549         else {
6550                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6551
6552                 new_skb = skb_copy_expand(skb,
6553                                           skb_headroom(skb) + more_headroom,
6554                                           skb_tailroom(skb), GFP_ATOMIC);
6555         }
6556
6557         if (!new_skb) {
6558                 ret = -1;
6559         } else {
6560                 /* New SKB is guaranteed to be linear. */
6561                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6562                                           PCI_DMA_TODEVICE);
6563                 /* Make sure the mapping succeeded */
6564                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6565                         dev_kfree_skb(new_skb);
6566                         ret = -1;
6567                 } else {
6568                         u32 save_entry = *entry;
6569
6570                         base_flags |= TXD_FLAG_END;
6571
6572                         tnapi->tx_buffers[*entry].skb = new_skb;
6573                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6574                                            mapping, new_addr);
6575
6576                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6577                                             new_skb->len, base_flags,
6578                                             mss, vlan)) {
6579                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6580                                 dev_kfree_skb(new_skb);
6581                                 ret = -1;
6582                         }
6583                 }
6584         }
6585
6586         dev_kfree_skb(skb);
6587         *pskb = new_skb;
6588         return ret;
6589 }
6590
6591 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6592
6593 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6594  * TSO header is greater than 80 bytes.
6595  */
6596 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6597 {
6598         struct sk_buff *segs, *nskb;
6599         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6600
6601         /* Estimate the number of fragments in the worst case */
6602         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6603                 netif_stop_queue(tp->dev);
6604
6605                 /* netif_tx_stop_queue() must be done before checking
6606                  * checking tx index in tg3_tx_avail() below, because in
6607                  * tg3_tx(), we update tx index before checking for
6608                  * netif_tx_queue_stopped().
6609                  */
6610                 smp_mb();
6611                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6612                         return NETDEV_TX_BUSY;
6613
6614                 netif_wake_queue(tp->dev);
6615         }
6616
6617         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6618         if (IS_ERR(segs))
6619                 goto tg3_tso_bug_end;
6620
6621         do {
6622                 nskb = segs;
6623                 segs = segs->next;
6624                 nskb->next = NULL;
6625                 tg3_start_xmit(nskb, tp->dev);
6626         } while (segs);
6627
6628 tg3_tso_bug_end:
6629         dev_kfree_skb(skb);
6630
6631         return NETDEV_TX_OK;
6632 }
6633
6634 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6635  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6636  */
6637 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6638 {
6639         struct tg3 *tp = netdev_priv(dev);
6640         u32 len, entry, base_flags, mss, vlan = 0;
6641         u32 budget;
6642         int i = -1, would_hit_hwbug;
6643         dma_addr_t mapping;
6644         struct tg3_napi *tnapi;
6645         struct netdev_queue *txq;
6646         unsigned int last;
6647
6648         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6649         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6650         if (tg3_flag(tp, ENABLE_TSS))
6651                 tnapi++;
6652
6653         budget = tg3_tx_avail(tnapi);
6654
6655         /* We are running in BH disabled context with netif_tx_lock
6656          * and TX reclaim runs via tp->napi.poll inside of a software
6657          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6658          * no IRQ context deadlocks to worry about either.  Rejoice!
6659          */
6660         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6661                 if (!netif_tx_queue_stopped(txq)) {
6662                         netif_tx_stop_queue(txq);
6663
6664                         /* This is a hard error, log it. */
6665                         netdev_err(dev,
6666                                    "BUG! Tx Ring full when queue awake!\n");
6667                 }
6668                 return NETDEV_TX_BUSY;
6669         }
6670
6671         entry = tnapi->tx_prod;
6672         base_flags = 0;
6673         if (skb->ip_summed == CHECKSUM_PARTIAL)
6674                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6675
6676         mss = skb_shinfo(skb)->gso_size;
6677         if (mss) {
6678                 struct iphdr *iph;
6679                 u32 tcp_opt_len, hdr_len;
6680
6681                 if (skb_header_cloned(skb) &&
6682                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6683                         goto drop;
6684
6685                 iph = ip_hdr(skb);
6686                 tcp_opt_len = tcp_optlen(skb);
6687
6688                 if (skb_is_gso_v6(skb)) {
6689                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6690                 } else {
6691                         u32 ip_tcp_len;
6692
6693                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6694                         hdr_len = ip_tcp_len + tcp_opt_len;
6695
6696                         iph->check = 0;
6697                         iph->tot_len = htons(mss + hdr_len);
6698                 }
6699
6700                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6701                     tg3_flag(tp, TSO_BUG))
6702                         return tg3_tso_bug(tp, skb);
6703
6704                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6705                                TXD_FLAG_CPU_POST_DMA);
6706
6707                 if (tg3_flag(tp, HW_TSO_1) ||
6708                     tg3_flag(tp, HW_TSO_2) ||
6709                     tg3_flag(tp, HW_TSO_3)) {
6710                         tcp_hdr(skb)->check = 0;
6711                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6712                 } else
6713                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6714                                                                  iph->daddr, 0,
6715                                                                  IPPROTO_TCP,
6716                                                                  0);
6717
6718                 if (tg3_flag(tp, HW_TSO_3)) {
6719                         mss |= (hdr_len & 0xc) << 12;
6720                         if (hdr_len & 0x10)
6721                                 base_flags |= 0x00000010;
6722                         base_flags |= (hdr_len & 0x3e0) << 5;
6723                 } else if (tg3_flag(tp, HW_TSO_2))
6724                         mss |= hdr_len << 9;
6725                 else if (tg3_flag(tp, HW_TSO_1) ||
6726                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6727                         if (tcp_opt_len || iph->ihl > 5) {
6728                                 int tsflags;
6729
6730                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6731                                 mss |= (tsflags << 11);
6732                         }
6733                 } else {
6734                         if (tcp_opt_len || iph->ihl > 5) {
6735                                 int tsflags;
6736
6737                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6738                                 base_flags |= tsflags << 12;
6739                         }
6740                 }
6741         }
6742
6743         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6744             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6745                 base_flags |= TXD_FLAG_JMB_PKT;
6746
6747         if (vlan_tx_tag_present(skb)) {
6748                 base_flags |= TXD_FLAG_VLAN;
6749                 vlan = vlan_tx_tag_get(skb);
6750         }
6751
6752         len = skb_headlen(skb);
6753
6754         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6755         if (pci_dma_mapping_error(tp->pdev, mapping))
6756                 goto drop;
6757
6758
6759         tnapi->tx_buffers[entry].skb = skb;
6760         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6761
6762         would_hit_hwbug = 0;
6763
6764         if (tg3_flag(tp, 5701_DMA_BUG))
6765                 would_hit_hwbug = 1;
6766
6767         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6768                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6769                             mss, vlan)) {
6770                 would_hit_hwbug = 1;
6771         /* Now loop through additional data fragments, and queue them. */
6772         } else if (skb_shinfo(skb)->nr_frags > 0) {
6773                 u32 tmp_mss = mss;
6774
6775                 if (!tg3_flag(tp, HW_TSO_1) &&
6776                     !tg3_flag(tp, HW_TSO_2) &&
6777                     !tg3_flag(tp, HW_TSO_3))
6778                         tmp_mss = 0;
6779
6780                 last = skb_shinfo(skb)->nr_frags - 1;
6781                 for (i = 0; i <= last; i++) {
6782                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6783
6784                         len = skb_frag_size(frag);
6785                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6786                                                    len, DMA_TO_DEVICE);
6787
6788                         tnapi->tx_buffers[entry].skb = NULL;
6789                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6790                                            mapping);
6791                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6792                                 goto dma_error;
6793
6794                         if (!budget ||
6795                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6796                                             len, base_flags |
6797                                             ((i == last) ? TXD_FLAG_END : 0),
6798                                             tmp_mss, vlan)) {
6799                                 would_hit_hwbug = 1;
6800                                 break;
6801                         }
6802                 }
6803         }
6804
6805         if (would_hit_hwbug) {
6806                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6807
6808                 /* If the workaround fails due to memory/mapping
6809                  * failure, silently drop this packet.
6810                  */
6811                 entry = tnapi->tx_prod;
6812                 budget = tg3_tx_avail(tnapi);
6813                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6814                                                 base_flags, mss, vlan))
6815                         goto drop_nofree;
6816         }
6817
6818         skb_tx_timestamp(skb);
6819
6820         /* Packets are ready, update Tx producer idx local and on card. */
6821         tw32_tx_mbox(tnapi->prodmbox, entry);
6822
6823         tnapi->tx_prod = entry;
6824         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6825                 netif_tx_stop_queue(txq);
6826
6827                 /* netif_tx_stop_queue() must be done before checking
6828                  * checking tx index in tg3_tx_avail() below, because in
6829                  * tg3_tx(), we update tx index before checking for
6830                  * netif_tx_queue_stopped().
6831                  */
6832                 smp_mb();
6833                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6834                         netif_tx_wake_queue(txq);
6835         }
6836
6837         mmiowb();
6838         return NETDEV_TX_OK;
6839
6840 dma_error:
6841         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6842         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6843 drop:
6844         dev_kfree_skb(skb);
6845 drop_nofree:
6846         tp->tx_dropped++;
6847         return NETDEV_TX_OK;
6848 }
6849
6850 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6851 {
6852         if (enable) {
6853                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6854                                   MAC_MODE_PORT_MODE_MASK);
6855
6856                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6857
6858                 if (!tg3_flag(tp, 5705_PLUS))
6859                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6860
6861                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6862                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6863                 else
6864                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6865         } else {
6866                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6867
6868                 if (tg3_flag(tp, 5705_PLUS) ||
6869                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6870                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6871                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6872         }
6873
6874         tw32(MAC_MODE, tp->mac_mode);
6875         udelay(40);
6876 }
6877
6878 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6879 {
6880         u32 val, bmcr, mac_mode, ptest = 0;
6881
6882         tg3_phy_toggle_apd(tp, false);
6883         tg3_phy_toggle_automdix(tp, 0);
6884
6885         if (extlpbk && tg3_phy_set_extloopbk(tp))
6886                 return -EIO;
6887
6888         bmcr = BMCR_FULLDPLX;
6889         switch (speed) {
6890         case SPEED_10:
6891                 break;
6892         case SPEED_100:
6893                 bmcr |= BMCR_SPEED100;
6894                 break;
6895         case SPEED_1000:
6896         default:
6897                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6898                         speed = SPEED_100;
6899                         bmcr |= BMCR_SPEED100;
6900                 } else {
6901                         speed = SPEED_1000;
6902                         bmcr |= BMCR_SPEED1000;
6903                 }
6904         }
6905
6906         if (extlpbk) {
6907                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6908                         tg3_readphy(tp, MII_CTRL1000, &val);
6909                         val |= CTL1000_AS_MASTER |
6910                                CTL1000_ENABLE_MASTER;
6911                         tg3_writephy(tp, MII_CTRL1000, val);
6912                 } else {
6913                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6914                                 MII_TG3_FET_PTEST_TRIM_2;
6915                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6916                 }
6917         } else
6918                 bmcr |= BMCR_LOOPBACK;
6919
6920         tg3_writephy(tp, MII_BMCR, bmcr);
6921
6922         /* The write needs to be flushed for the FETs */
6923         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6924                 tg3_readphy(tp, MII_BMCR, &bmcr);
6925
6926         udelay(40);
6927
6928         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6930                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6931                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6932                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6933
6934                 /* The write needs to be flushed for the AC131 */
6935                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6936         }
6937
6938         /* Reset to prevent losing 1st rx packet intermittently */
6939         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6940             tg3_flag(tp, 5780_CLASS)) {
6941                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6942                 udelay(10);
6943                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6944         }
6945
6946         mac_mode = tp->mac_mode &
6947                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6948         if (speed == SPEED_1000)
6949                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6950         else
6951                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6952
6953         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6954                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6955
6956                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6957                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
6958                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6959                         mac_mode |= MAC_MODE_LINK_POLARITY;
6960
6961                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6962                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6963         }
6964
6965         tw32(MAC_MODE, mac_mode);
6966         udelay(40);
6967
6968         return 0;
6969 }
6970
6971 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
6972 {
6973         struct tg3 *tp = netdev_priv(dev);
6974
6975         if (features & NETIF_F_LOOPBACK) {
6976                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6977                         return;
6978
6979                 spin_lock_bh(&tp->lock);
6980                 tg3_mac_loopback(tp, true);
6981                 netif_carrier_on(tp->dev);
6982                 spin_unlock_bh(&tp->lock);
6983                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6984         } else {
6985                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6986                         return;
6987
6988                 spin_lock_bh(&tp->lock);
6989                 tg3_mac_loopback(tp, false);
6990                 /* Force link status check */
6991                 tg3_setup_phy(tp, 1);
6992                 spin_unlock_bh(&tp->lock);
6993                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6994         }
6995 }
6996
6997 static netdev_features_t tg3_fix_features(struct net_device *dev,
6998         netdev_features_t features)
6999 {
7000         struct tg3 *tp = netdev_priv(dev);
7001
7002         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7003                 features &= ~NETIF_F_ALL_TSO;
7004
7005         return features;
7006 }
7007
7008 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7009 {
7010         netdev_features_t changed = dev->features ^ features;
7011
7012         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7013                 tg3_set_loopback(dev, features);
7014
7015         return 0;
7016 }
7017
7018 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7019                                int new_mtu)
7020 {
7021         dev->mtu = new_mtu;
7022
7023         if (new_mtu > ETH_DATA_LEN) {
7024                 if (tg3_flag(tp, 5780_CLASS)) {
7025                         netdev_update_features(dev);
7026                         tg3_flag_clear(tp, TSO_CAPABLE);
7027                 } else {
7028                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
7029                 }
7030         } else {
7031                 if (tg3_flag(tp, 5780_CLASS)) {
7032                         tg3_flag_set(tp, TSO_CAPABLE);
7033                         netdev_update_features(dev);
7034                 }
7035                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7036         }
7037 }
7038
7039 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7040 {
7041         struct tg3 *tp = netdev_priv(dev);
7042         int err;
7043
7044         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7045                 return -EINVAL;
7046
7047         if (!netif_running(dev)) {
7048                 /* We'll just catch it later when the
7049                  * device is up'd.
7050                  */
7051                 tg3_set_mtu(dev, tp, new_mtu);
7052                 return 0;
7053         }
7054
7055         tg3_phy_stop(tp);
7056
7057         tg3_netif_stop(tp);
7058
7059         tg3_full_lock(tp, 1);
7060
7061         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7062
7063         tg3_set_mtu(dev, tp, new_mtu);
7064
7065         err = tg3_restart_hw(tp, 0);
7066
7067         if (!err)
7068                 tg3_netif_start(tp);
7069
7070         tg3_full_unlock(tp);
7071
7072         if (!err)
7073                 tg3_phy_start(tp);
7074
7075         return err;
7076 }
7077
7078 static void tg3_rx_prodring_free(struct tg3 *tp,
7079                                  struct tg3_rx_prodring_set *tpr)
7080 {
7081         int i;
7082
7083         if (tpr != &tp->napi[0].prodring) {
7084                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7085                      i = (i + 1) & tp->rx_std_ring_mask)
7086                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7087                                         tp->rx_pkt_map_sz);
7088
7089                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7090                         for (i = tpr->rx_jmb_cons_idx;
7091                              i != tpr->rx_jmb_prod_idx;
7092                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7093                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7094                                                 TG3_RX_JMB_MAP_SZ);
7095                         }
7096                 }
7097
7098                 return;
7099         }
7100
7101         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7102                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7103                                 tp->rx_pkt_map_sz);
7104
7105         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7106                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7107                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7108                                         TG3_RX_JMB_MAP_SZ);
7109         }
7110 }
7111
7112 /* Initialize rx rings for packet processing.
7113  *
7114  * The chip has been shut down and the driver detached from
7115  * the networking, so no interrupts or new tx packets will
7116  * end up in the driver.  tp->{tx,}lock are held and thus
7117  * we may not sleep.
7118  */
7119 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7120                                  struct tg3_rx_prodring_set *tpr)
7121 {
7122         u32 i, rx_pkt_dma_sz;
7123
7124         tpr->rx_std_cons_idx = 0;
7125         tpr->rx_std_prod_idx = 0;
7126         tpr->rx_jmb_cons_idx = 0;
7127         tpr->rx_jmb_prod_idx = 0;
7128
7129         if (tpr != &tp->napi[0].prodring) {
7130                 memset(&tpr->rx_std_buffers[0], 0,
7131                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7132                 if (tpr->rx_jmb_buffers)
7133                         memset(&tpr->rx_jmb_buffers[0], 0,
7134                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7135                 goto done;
7136         }
7137
7138         /* Zero out all descriptors. */
7139         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7140
7141         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7142         if (tg3_flag(tp, 5780_CLASS) &&
7143             tp->dev->mtu > ETH_DATA_LEN)
7144                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7145         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7146
7147         /* Initialize invariants of the rings, we only set this
7148          * stuff once.  This works because the card does not
7149          * write into the rx buffer posting rings.
7150          */
7151         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7152                 struct tg3_rx_buffer_desc *rxd;
7153
7154                 rxd = &tpr->rx_std[i];
7155                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7156                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7157                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7158                                (i << RXD_OPAQUE_INDEX_SHIFT));
7159         }
7160
7161         /* Now allocate fresh SKBs for each rx ring. */
7162         for (i = 0; i < tp->rx_pending; i++) {
7163                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7164                         netdev_warn(tp->dev,
7165                                     "Using a smaller RX standard ring. Only "
7166                                     "%d out of %d buffers were allocated "
7167                                     "successfully\n", i, tp->rx_pending);
7168                         if (i == 0)
7169                                 goto initfail;
7170                         tp->rx_pending = i;
7171                         break;
7172                 }
7173         }
7174
7175         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7176                 goto done;
7177
7178         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7179
7180         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7181                 goto done;
7182
7183         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7184                 struct tg3_rx_buffer_desc *rxd;
7185
7186                 rxd = &tpr->rx_jmb[i].std;
7187                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7188                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7189                                   RXD_FLAG_JUMBO;
7190                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7191                        (i << RXD_OPAQUE_INDEX_SHIFT));
7192         }
7193
7194         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7195                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7196                         netdev_warn(tp->dev,
7197                                     "Using a smaller RX jumbo ring. Only %d "
7198                                     "out of %d buffers were allocated "
7199                                     "successfully\n", i, tp->rx_jumbo_pending);
7200                         if (i == 0)
7201                                 goto initfail;
7202                         tp->rx_jumbo_pending = i;
7203                         break;
7204                 }
7205         }
7206
7207 done:
7208         return 0;
7209
7210 initfail:
7211         tg3_rx_prodring_free(tp, tpr);
7212         return -ENOMEM;
7213 }
7214
7215 static void tg3_rx_prodring_fini(struct tg3 *tp,
7216                                  struct tg3_rx_prodring_set *tpr)
7217 {
7218         kfree(tpr->rx_std_buffers);
7219         tpr->rx_std_buffers = NULL;
7220         kfree(tpr->rx_jmb_buffers);
7221         tpr->rx_jmb_buffers = NULL;
7222         if (tpr->rx_std) {
7223                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7224                                   tpr->rx_std, tpr->rx_std_mapping);
7225                 tpr->rx_std = NULL;
7226         }
7227         if (tpr->rx_jmb) {
7228                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7229                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7230                 tpr->rx_jmb = NULL;
7231         }
7232 }
7233
7234 static int tg3_rx_prodring_init(struct tg3 *tp,
7235                                 struct tg3_rx_prodring_set *tpr)
7236 {
7237         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7238                                       GFP_KERNEL);
7239         if (!tpr->rx_std_buffers)
7240                 return -ENOMEM;
7241
7242         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7243                                          TG3_RX_STD_RING_BYTES(tp),
7244                                          &tpr->rx_std_mapping,
7245                                          GFP_KERNEL);
7246         if (!tpr->rx_std)
7247                 goto err_out;
7248
7249         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7250                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7251                                               GFP_KERNEL);
7252                 if (!tpr->rx_jmb_buffers)
7253                         goto err_out;
7254
7255                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7256                                                  TG3_RX_JMB_RING_BYTES(tp),
7257                                                  &tpr->rx_jmb_mapping,
7258                                                  GFP_KERNEL);
7259                 if (!tpr->rx_jmb)
7260                         goto err_out;
7261         }
7262
7263         return 0;
7264
7265 err_out:
7266         tg3_rx_prodring_fini(tp, tpr);
7267         return -ENOMEM;
7268 }
7269
7270 /* Free up pending packets in all rx/tx rings.
7271  *
7272  * The chip has been shut down and the driver detached from
7273  * the networking, so no interrupts or new tx packets will
7274  * end up in the driver.  tp->{tx,}lock is not held and we are not
7275  * in an interrupt context and thus may sleep.
7276  */
7277 static void tg3_free_rings(struct tg3 *tp)
7278 {
7279         int i, j;
7280
7281         for (j = 0; j < tp->irq_cnt; j++) {
7282                 struct tg3_napi *tnapi = &tp->napi[j];
7283
7284                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7285
7286                 if (!tnapi->tx_buffers)
7287                         continue;
7288
7289                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7290                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7291
7292                         if (!skb)
7293                                 continue;
7294
7295                         tg3_tx_skb_unmap(tnapi, i,
7296                                          skb_shinfo(skb)->nr_frags - 1);
7297
7298                         dev_kfree_skb_any(skb);
7299                 }
7300         }
7301 }
7302
7303 /* Initialize tx/rx rings for packet processing.
7304  *
7305  * The chip has been shut down and the driver detached from
7306  * the networking, so no interrupts or new tx packets will
7307  * end up in the driver.  tp->{tx,}lock are held and thus
7308  * we may not sleep.
7309  */
7310 static int tg3_init_rings(struct tg3 *tp)
7311 {
7312         int i;
7313
7314         /* Free up all the SKBs. */
7315         tg3_free_rings(tp);
7316
7317         for (i = 0; i < tp->irq_cnt; i++) {
7318                 struct tg3_napi *tnapi = &tp->napi[i];
7319
7320                 tnapi->last_tag = 0;
7321                 tnapi->last_irq_tag = 0;
7322                 tnapi->hw_status->status = 0;
7323                 tnapi->hw_status->status_tag = 0;
7324                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7325
7326                 tnapi->tx_prod = 0;
7327                 tnapi->tx_cons = 0;
7328                 if (tnapi->tx_ring)
7329                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7330
7331                 tnapi->rx_rcb_ptr = 0;
7332                 if (tnapi->rx_rcb)
7333                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7334
7335                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7336                         tg3_free_rings(tp);
7337                         return -ENOMEM;
7338                 }
7339         }
7340
7341         return 0;
7342 }
7343
7344 /*
7345  * Must not be invoked with interrupt sources disabled and
7346  * the hardware shutdown down.
7347  */
7348 static void tg3_free_consistent(struct tg3 *tp)
7349 {
7350         int i;
7351
7352         for (i = 0; i < tp->irq_cnt; i++) {
7353                 struct tg3_napi *tnapi = &tp->napi[i];
7354
7355                 if (tnapi->tx_ring) {
7356                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7357                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7358                         tnapi->tx_ring = NULL;
7359                 }
7360
7361                 kfree(tnapi->tx_buffers);
7362                 tnapi->tx_buffers = NULL;
7363
7364                 if (tnapi->rx_rcb) {
7365                         dma_free_coherent(&tp->pdev->dev,
7366                                           TG3_RX_RCB_RING_BYTES(tp),
7367                                           tnapi->rx_rcb,
7368                                           tnapi->rx_rcb_mapping);
7369                         tnapi->rx_rcb = NULL;
7370                 }
7371
7372                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7373
7374                 if (tnapi->hw_status) {
7375                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7376                                           tnapi->hw_status,
7377                                           tnapi->status_mapping);
7378                         tnapi->hw_status = NULL;
7379                 }
7380         }
7381
7382         if (tp->hw_stats) {
7383                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7384                                   tp->hw_stats, tp->stats_mapping);
7385                 tp->hw_stats = NULL;
7386         }
7387 }
7388
7389 /*
7390  * Must not be invoked with interrupt sources disabled and
7391  * the hardware shutdown down.  Can sleep.
7392  */
7393 static int tg3_alloc_consistent(struct tg3 *tp)
7394 {
7395         int i;
7396
7397         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7398                                           sizeof(struct tg3_hw_stats),
7399                                           &tp->stats_mapping,
7400                                           GFP_KERNEL);
7401         if (!tp->hw_stats)
7402                 goto err_out;
7403
7404         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7405
7406         for (i = 0; i < tp->irq_cnt; i++) {
7407                 struct tg3_napi *tnapi = &tp->napi[i];
7408                 struct tg3_hw_status *sblk;
7409
7410                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7411                                                       TG3_HW_STATUS_SIZE,
7412                                                       &tnapi->status_mapping,
7413                                                       GFP_KERNEL);
7414                 if (!tnapi->hw_status)
7415                         goto err_out;
7416
7417                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7418                 sblk = tnapi->hw_status;
7419
7420                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7421                         goto err_out;
7422
7423                 /* If multivector TSS is enabled, vector 0 does not handle
7424                  * tx interrupts.  Don't allocate any resources for it.
7425                  */
7426                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7427                     (i && tg3_flag(tp, ENABLE_TSS))) {
7428                         tnapi->tx_buffers = kzalloc(
7429                                                sizeof(struct tg3_tx_ring_info) *
7430                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7431                         if (!tnapi->tx_buffers)
7432                                 goto err_out;
7433
7434                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7435                                                             TG3_TX_RING_BYTES,
7436                                                         &tnapi->tx_desc_mapping,
7437                                                             GFP_KERNEL);
7438                         if (!tnapi->tx_ring)
7439                                 goto err_out;
7440                 }
7441
7442                 /*
7443                  * When RSS is enabled, the status block format changes
7444                  * slightly.  The "rx_jumbo_consumer", "reserved",
7445                  * and "rx_mini_consumer" members get mapped to the
7446                  * other three rx return ring producer indexes.
7447                  */
7448                 switch (i) {
7449                 default:
7450                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7451                         break;
7452                 case 2:
7453                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7454                         break;
7455                 case 3:
7456                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7457                         break;
7458                 case 4:
7459                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7460                         break;
7461                 }
7462
7463                 /*
7464                  * If multivector RSS is enabled, vector 0 does not handle
7465                  * rx or tx interrupts.  Don't allocate any resources for it.
7466                  */
7467                 if (!i && tg3_flag(tp, ENABLE_RSS))
7468                         continue;
7469
7470                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7471                                                    TG3_RX_RCB_RING_BYTES(tp),
7472                                                    &tnapi->rx_rcb_mapping,
7473                                                    GFP_KERNEL);
7474                 if (!tnapi->rx_rcb)
7475                         goto err_out;
7476
7477                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7478         }
7479
7480         return 0;
7481
7482 err_out:
7483         tg3_free_consistent(tp);
7484         return -ENOMEM;
7485 }
7486
7487 #define MAX_WAIT_CNT 1000
7488
7489 /* To stop a block, clear the enable bit and poll till it
7490  * clears.  tp->lock is held.
7491  */
7492 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7493 {
7494         unsigned int i;
7495         u32 val;
7496
7497         if (tg3_flag(tp, 5705_PLUS)) {
7498                 switch (ofs) {
7499                 case RCVLSC_MODE:
7500                 case DMAC_MODE:
7501                 case MBFREE_MODE:
7502                 case BUFMGR_MODE:
7503                 case MEMARB_MODE:
7504                         /* We can't enable/disable these bits of the
7505                          * 5705/5750, just say success.
7506                          */
7507                         return 0;
7508
7509                 default:
7510                         break;
7511                 }
7512         }
7513
7514         val = tr32(ofs);
7515         val &= ~enable_bit;
7516         tw32_f(ofs, val);
7517
7518         for (i = 0; i < MAX_WAIT_CNT; i++) {
7519                 udelay(100);
7520                 val = tr32(ofs);
7521                 if ((val & enable_bit) == 0)
7522                         break;
7523         }
7524
7525         if (i == MAX_WAIT_CNT && !silent) {
7526                 dev_err(&tp->pdev->dev,
7527                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7528                         ofs, enable_bit);
7529                 return -ENODEV;
7530         }
7531
7532         return 0;
7533 }
7534
7535 /* tp->lock is held. */
7536 static int tg3_abort_hw(struct tg3 *tp, int silent)
7537 {
7538         int i, err;
7539
7540         tg3_disable_ints(tp);
7541
7542         tp->rx_mode &= ~RX_MODE_ENABLE;
7543         tw32_f(MAC_RX_MODE, tp->rx_mode);
7544         udelay(10);
7545
7546         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7547         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7548         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7549         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7550         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7551         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7552
7553         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7554         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7555         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7556         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7557         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7558         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7559         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7560
7561         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7562         tw32_f(MAC_MODE, tp->mac_mode);
7563         udelay(40);
7564
7565         tp->tx_mode &= ~TX_MODE_ENABLE;
7566         tw32_f(MAC_TX_MODE, tp->tx_mode);
7567
7568         for (i = 0; i < MAX_WAIT_CNT; i++) {
7569                 udelay(100);
7570                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7571                         break;
7572         }
7573         if (i >= MAX_WAIT_CNT) {
7574                 dev_err(&tp->pdev->dev,
7575                         "%s timed out, TX_MODE_ENABLE will not clear "
7576                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7577                 err |= -ENODEV;
7578         }
7579
7580         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7581         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7582         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7583
7584         tw32(FTQ_RESET, 0xffffffff);
7585         tw32(FTQ_RESET, 0x00000000);
7586
7587         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7588         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7589
7590         for (i = 0; i < tp->irq_cnt; i++) {
7591                 struct tg3_napi *tnapi = &tp->napi[i];
7592                 if (tnapi->hw_status)
7593                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7594         }
7595         if (tp->hw_stats)
7596                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7597
7598         return err;
7599 }
7600
7601 /* Save PCI command register before chip reset */
7602 static void tg3_save_pci_state(struct tg3 *tp)
7603 {
7604         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7605 }
7606
7607 /* Restore PCI state after chip reset */
7608 static void tg3_restore_pci_state(struct tg3 *tp)
7609 {
7610         u32 val;
7611
7612         /* Re-enable indirect register accesses. */
7613         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7614                                tp->misc_host_ctrl);
7615
7616         /* Set MAX PCI retry to zero. */
7617         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7618         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7619             tg3_flag(tp, PCIX_MODE))
7620                 val |= PCISTATE_RETRY_SAME_DMA;
7621         /* Allow reads and writes to the APE register and memory space. */
7622         if (tg3_flag(tp, ENABLE_APE))
7623                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7624                        PCISTATE_ALLOW_APE_SHMEM_WR |
7625                        PCISTATE_ALLOW_APE_PSPACE_WR;
7626         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7627
7628         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7629
7630         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7631                 if (tg3_flag(tp, PCI_EXPRESS))
7632                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7633                 else {
7634                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7635                                               tp->pci_cacheline_sz);
7636                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7637                                               tp->pci_lat_timer);
7638                 }
7639         }
7640
7641         /* Make sure PCI-X relaxed ordering bit is clear. */
7642         if (tg3_flag(tp, PCIX_MODE)) {
7643                 u16 pcix_cmd;
7644
7645                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7646                                      &pcix_cmd);
7647                 pcix_cmd &= ~PCI_X_CMD_ERO;
7648                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7649                                       pcix_cmd);
7650         }
7651
7652         if (tg3_flag(tp, 5780_CLASS)) {
7653
7654                 /* Chip reset on 5780 will reset MSI enable bit,
7655                  * so need to restore it.
7656                  */
7657                 if (tg3_flag(tp, USING_MSI)) {
7658                         u16 ctrl;
7659
7660                         pci_read_config_word(tp->pdev,
7661                                              tp->msi_cap + PCI_MSI_FLAGS,
7662                                              &ctrl);
7663                         pci_write_config_word(tp->pdev,
7664                                               tp->msi_cap + PCI_MSI_FLAGS,
7665                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7666                         val = tr32(MSGINT_MODE);
7667                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7668                 }
7669         }
7670 }
7671
7672 /* tp->lock is held. */
7673 static int tg3_chip_reset(struct tg3 *tp)
7674 {
7675         u32 val;
7676         void (*write_op)(struct tg3 *, u32, u32);
7677         int i, err;
7678
7679         tg3_nvram_lock(tp);
7680
7681         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7682
7683         /* No matching tg3_nvram_unlock() after this because
7684          * chip reset below will undo the nvram lock.
7685          */
7686         tp->nvram_lock_cnt = 0;
7687
7688         /* GRC_MISC_CFG core clock reset will clear the memory
7689          * enable bit in PCI register 4 and the MSI enable bit
7690          * on some chips, so we save relevant registers here.
7691          */
7692         tg3_save_pci_state(tp);
7693
7694         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7695             tg3_flag(tp, 5755_PLUS))
7696                 tw32(GRC_FASTBOOT_PC, 0);
7697
7698         /*
7699          * We must avoid the readl() that normally takes place.
7700          * It locks machines, causes machine checks, and other
7701          * fun things.  So, temporarily disable the 5701
7702          * hardware workaround, while we do the reset.
7703          */
7704         write_op = tp->write32;
7705         if (write_op == tg3_write_flush_reg32)
7706                 tp->write32 = tg3_write32;
7707
7708         /* Prevent the irq handler from reading or writing PCI registers
7709          * during chip reset when the memory enable bit in the PCI command
7710          * register may be cleared.  The chip does not generate interrupt
7711          * at this time, but the irq handler may still be called due to irq
7712          * sharing or irqpoll.
7713          */
7714         tg3_flag_set(tp, CHIP_RESETTING);
7715         for (i = 0; i < tp->irq_cnt; i++) {
7716                 struct tg3_napi *tnapi = &tp->napi[i];
7717                 if (tnapi->hw_status) {
7718                         tnapi->hw_status->status = 0;
7719                         tnapi->hw_status->status_tag = 0;
7720                 }
7721                 tnapi->last_tag = 0;
7722                 tnapi->last_irq_tag = 0;
7723         }
7724         smp_mb();
7725
7726         for (i = 0; i < tp->irq_cnt; i++)
7727                 synchronize_irq(tp->napi[i].irq_vec);
7728
7729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7730                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7731                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7732         }
7733
7734         /* do the reset */
7735         val = GRC_MISC_CFG_CORECLK_RESET;
7736
7737         if (tg3_flag(tp, PCI_EXPRESS)) {
7738                 /* Force PCIe 1.0a mode */
7739                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7740                     !tg3_flag(tp, 57765_PLUS) &&
7741                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7742                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7743                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7744
7745                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7746                         tw32(GRC_MISC_CFG, (1 << 29));
7747                         val |= (1 << 29);
7748                 }
7749         }
7750
7751         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7752                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7753                 tw32(GRC_VCPU_EXT_CTRL,
7754                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7755         }
7756
7757         /* Manage gphy power for all CPMU absent PCIe devices. */
7758         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7759                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7760
7761         tw32(GRC_MISC_CFG, val);
7762
7763         /* restore 5701 hardware bug workaround write method */
7764         tp->write32 = write_op;
7765
7766         /* Unfortunately, we have to delay before the PCI read back.
7767          * Some 575X chips even will not respond to a PCI cfg access
7768          * when the reset command is given to the chip.
7769          *
7770          * How do these hardware designers expect things to work
7771          * properly if the PCI write is posted for a long period
7772          * of time?  It is always necessary to have some method by
7773          * which a register read back can occur to push the write
7774          * out which does the reset.
7775          *
7776          * For most tg3 variants the trick below was working.
7777          * Ho hum...
7778          */
7779         udelay(120);
7780
7781         /* Flush PCI posted writes.  The normal MMIO registers
7782          * are inaccessible at this time so this is the only
7783          * way to make this reliably (actually, this is no longer
7784          * the case, see above).  I tried to use indirect
7785          * register read/write but this upset some 5701 variants.
7786          */
7787         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7788
7789         udelay(120);
7790
7791         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7792                 u16 val16;
7793
7794                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7795                         int i;
7796                         u32 cfg_val;
7797
7798                         /* Wait for link training to complete.  */
7799                         for (i = 0; i < 5000; i++)
7800                                 udelay(100);
7801
7802                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7803                         pci_write_config_dword(tp->pdev, 0xc4,
7804                                                cfg_val | (1 << 15));
7805                 }
7806
7807                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7808                 pci_read_config_word(tp->pdev,
7809                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7810                                      &val16);
7811                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7812                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7813                 /*
7814                  * Older PCIe devices only support the 128 byte
7815                  * MPS setting.  Enforce the restriction.
7816                  */
7817                 if (!tg3_flag(tp, CPMU_PRESENT))
7818                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7819                 pci_write_config_word(tp->pdev,
7820                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7821                                       val16);
7822
7823                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7824
7825                 /* Clear error status */
7826                 pci_write_config_word(tp->pdev,
7827                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7828                                       PCI_EXP_DEVSTA_CED |
7829                                       PCI_EXP_DEVSTA_NFED |
7830                                       PCI_EXP_DEVSTA_FED |
7831                                       PCI_EXP_DEVSTA_URD);
7832         }
7833
7834         tg3_restore_pci_state(tp);
7835
7836         tg3_flag_clear(tp, CHIP_RESETTING);
7837         tg3_flag_clear(tp, ERROR_PROCESSED);
7838
7839         val = 0;
7840         if (tg3_flag(tp, 5780_CLASS))
7841                 val = tr32(MEMARB_MODE);
7842         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7843
7844         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7845                 tg3_stop_fw(tp);
7846                 tw32(0x5000, 0x400);
7847         }
7848
7849         tw32(GRC_MODE, tp->grc_mode);
7850
7851         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7852                 val = tr32(0xc4);
7853
7854                 tw32(0xc4, val | (1 << 15));
7855         }
7856
7857         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7858             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7859                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7860                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7861                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7862                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7863         }
7864
7865         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7866                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7867                 val = tp->mac_mode;
7868         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7869                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7870                 val = tp->mac_mode;
7871         } else
7872                 val = 0;
7873
7874         tw32_f(MAC_MODE, val);
7875         udelay(40);
7876
7877         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7878
7879         err = tg3_poll_fw(tp);
7880         if (err)
7881                 return err;
7882
7883         tg3_mdio_start(tp);
7884
7885         if (tg3_flag(tp, PCI_EXPRESS) &&
7886             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7887             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7888             !tg3_flag(tp, 57765_PLUS)) {
7889                 val = tr32(0x7c00);
7890
7891                 tw32(0x7c00, val | (1 << 25));
7892         }
7893
7894         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7895                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7896                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7897         }
7898
7899         /* Reprobe ASF enable state.  */
7900         tg3_flag_clear(tp, ENABLE_ASF);
7901         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7902         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7903         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7904                 u32 nic_cfg;
7905
7906                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7907                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7908                         tg3_flag_set(tp, ENABLE_ASF);
7909                         tp->last_event_jiffies = jiffies;
7910                         if (tg3_flag(tp, 5750_PLUS))
7911                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7912                 }
7913         }
7914
7915         return 0;
7916 }
7917
7918 /* tp->lock is held. */
7919 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7920 {
7921         int err;
7922
7923         tg3_stop_fw(tp);
7924
7925         tg3_write_sig_pre_reset(tp, kind);
7926
7927         tg3_abort_hw(tp, silent);
7928         err = tg3_chip_reset(tp);
7929
7930         __tg3_set_mac_addr(tp, 0);
7931
7932         tg3_write_sig_legacy(tp, kind);
7933         tg3_write_sig_post_reset(tp, kind);
7934
7935         if (err)
7936                 return err;
7937
7938         return 0;
7939 }
7940
7941 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7942 {
7943         struct tg3 *tp = netdev_priv(dev);
7944         struct sockaddr *addr = p;
7945         int err = 0, skip_mac_1 = 0;
7946
7947         if (!is_valid_ether_addr(addr->sa_data))
7948                 return -EINVAL;
7949
7950         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7951
7952         if (!netif_running(dev))
7953                 return 0;
7954
7955         if (tg3_flag(tp, ENABLE_ASF)) {
7956                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7957
7958                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7959                 addr0_low = tr32(MAC_ADDR_0_LOW);
7960                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7961                 addr1_low = tr32(MAC_ADDR_1_LOW);
7962
7963                 /* Skip MAC addr 1 if ASF is using it. */
7964                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7965                     !(addr1_high == 0 && addr1_low == 0))
7966                         skip_mac_1 = 1;
7967         }
7968         spin_lock_bh(&tp->lock);
7969         __tg3_set_mac_addr(tp, skip_mac_1);
7970         spin_unlock_bh(&tp->lock);
7971
7972         return err;
7973 }
7974
7975 /* tp->lock is held. */
7976 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7977                            dma_addr_t mapping, u32 maxlen_flags,
7978                            u32 nic_addr)
7979 {
7980         tg3_write_mem(tp,
7981                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7982                       ((u64) mapping >> 32));
7983         tg3_write_mem(tp,
7984                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7985                       ((u64) mapping & 0xffffffff));
7986         tg3_write_mem(tp,
7987                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7988                        maxlen_flags);
7989
7990         if (!tg3_flag(tp, 5705_PLUS))
7991                 tg3_write_mem(tp,
7992                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7993                               nic_addr);
7994 }
7995
7996 static void __tg3_set_rx_mode(struct net_device *);
7997 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7998 {
7999         int i;
8000
8001         if (!tg3_flag(tp, ENABLE_TSS)) {
8002                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8003                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8004                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8005         } else {
8006                 tw32(HOSTCC_TXCOL_TICKS, 0);
8007                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8008                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8009         }
8010
8011         if (!tg3_flag(tp, ENABLE_RSS)) {
8012                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8013                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8014                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8015         } else {
8016                 tw32(HOSTCC_RXCOL_TICKS, 0);
8017                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8018                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8019         }
8020
8021         if (!tg3_flag(tp, 5705_PLUS)) {
8022                 u32 val = ec->stats_block_coalesce_usecs;
8023
8024                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8025                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8026
8027                 if (!netif_carrier_ok(tp->dev))
8028                         val = 0;
8029
8030                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8031         }
8032
8033         for (i = 0; i < tp->irq_cnt - 1; i++) {
8034                 u32 reg;
8035
8036                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8037                 tw32(reg, ec->rx_coalesce_usecs);
8038                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8039                 tw32(reg, ec->rx_max_coalesced_frames);
8040                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8041                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8042
8043                 if (tg3_flag(tp, ENABLE_TSS)) {
8044                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8045                         tw32(reg, ec->tx_coalesce_usecs);
8046                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8047                         tw32(reg, ec->tx_max_coalesced_frames);
8048                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8049                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8050                 }
8051         }
8052
8053         for (; i < tp->irq_max - 1; i++) {
8054                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8055                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8056                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8057
8058                 if (tg3_flag(tp, ENABLE_TSS)) {
8059                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8060                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8061                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8062                 }
8063         }
8064 }
8065
8066 /* tp->lock is held. */
8067 static void tg3_rings_reset(struct tg3 *tp)
8068 {
8069         int i;
8070         u32 stblk, txrcb, rxrcb, limit;
8071         struct tg3_napi *tnapi = &tp->napi[0];
8072
8073         /* Disable all transmit rings but the first. */
8074         if (!tg3_flag(tp, 5705_PLUS))
8075                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8076         else if (tg3_flag(tp, 5717_PLUS))
8077                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8078         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8079                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8080         else
8081                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8082
8083         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8084              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8085                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8086                               BDINFO_FLAGS_DISABLED);
8087
8088
8089         /* Disable all receive return rings but the first. */
8090         if (tg3_flag(tp, 5717_PLUS))
8091                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8092         else if (!tg3_flag(tp, 5705_PLUS))
8093                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8094         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8095                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8096                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8097         else
8098                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8099
8100         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8101              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8102                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8103                               BDINFO_FLAGS_DISABLED);
8104
8105         /* Disable interrupts */
8106         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8107         tp->napi[0].chk_msi_cnt = 0;
8108         tp->napi[0].last_rx_cons = 0;
8109         tp->napi[0].last_tx_cons = 0;
8110
8111         /* Zero mailbox registers. */
8112         if (tg3_flag(tp, SUPPORT_MSIX)) {
8113                 for (i = 1; i < tp->irq_max; i++) {
8114                         tp->napi[i].tx_prod = 0;
8115                         tp->napi[i].tx_cons = 0;
8116                         if (tg3_flag(tp, ENABLE_TSS))
8117                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8118                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8119                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8120                         tp->napi[i].chk_msi_cnt = 0;
8121                         tp->napi[i].last_rx_cons = 0;
8122                         tp->napi[i].last_tx_cons = 0;
8123                 }
8124                 if (!tg3_flag(tp, ENABLE_TSS))
8125                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8126         } else {
8127                 tp->napi[0].tx_prod = 0;
8128                 tp->napi[0].tx_cons = 0;
8129                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8130                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8131         }
8132
8133         /* Make sure the NIC-based send BD rings are disabled. */
8134         if (!tg3_flag(tp, 5705_PLUS)) {
8135                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8136                 for (i = 0; i < 16; i++)
8137                         tw32_tx_mbox(mbox + i * 8, 0);
8138         }
8139
8140         txrcb = NIC_SRAM_SEND_RCB;
8141         rxrcb = NIC_SRAM_RCV_RET_RCB;
8142
8143         /* Clear status block in ram. */
8144         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8145
8146         /* Set status block DMA address */
8147         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8148              ((u64) tnapi->status_mapping >> 32));
8149         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8150              ((u64) tnapi->status_mapping & 0xffffffff));
8151
8152         if (tnapi->tx_ring) {
8153                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8154                                (TG3_TX_RING_SIZE <<
8155                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8156                                NIC_SRAM_TX_BUFFER_DESC);
8157                 txrcb += TG3_BDINFO_SIZE;
8158         }
8159
8160         if (tnapi->rx_rcb) {
8161                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8162                                (tp->rx_ret_ring_mask + 1) <<
8163                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8164                 rxrcb += TG3_BDINFO_SIZE;
8165         }
8166
8167         stblk = HOSTCC_STATBLCK_RING1;
8168
8169         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8170                 u64 mapping = (u64)tnapi->status_mapping;
8171                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8172                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8173
8174                 /* Clear status block in ram. */
8175                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8176
8177                 if (tnapi->tx_ring) {
8178                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8179                                        (TG3_TX_RING_SIZE <<
8180                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8181                                        NIC_SRAM_TX_BUFFER_DESC);
8182                         txrcb += TG3_BDINFO_SIZE;
8183                 }
8184
8185                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8186                                ((tp->rx_ret_ring_mask + 1) <<
8187                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8188
8189                 stblk += 8;
8190                 rxrcb += TG3_BDINFO_SIZE;
8191         }
8192 }
8193
8194 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8195 {
8196         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8197
8198         if (!tg3_flag(tp, 5750_PLUS) ||
8199             tg3_flag(tp, 5780_CLASS) ||
8200             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8201             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8202                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8203         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8204                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8205                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8206         else
8207                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8208
8209         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8210         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8211
8212         val = min(nic_rep_thresh, host_rep_thresh);
8213         tw32(RCVBDI_STD_THRESH, val);
8214
8215         if (tg3_flag(tp, 57765_PLUS))
8216                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8217
8218         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8219                 return;
8220
8221         if (!tg3_flag(tp, 5705_PLUS))
8222                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8223         else
8224                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8225
8226         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8227
8228         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8229         tw32(RCVBDI_JUMBO_THRESH, val);
8230
8231         if (tg3_flag(tp, 57765_PLUS))
8232                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8233 }
8234
8235 /* tp->lock is held. */
8236 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8237 {
8238         u32 val, rdmac_mode;
8239         int i, err, limit;
8240         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8241
8242         tg3_disable_ints(tp);
8243
8244         tg3_stop_fw(tp);
8245
8246         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8247
8248         if (tg3_flag(tp, INIT_COMPLETE))
8249                 tg3_abort_hw(tp, 1);
8250
8251         /* Enable MAC control of LPI */
8252         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8253                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8254                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8255                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8256
8257                 tw32_f(TG3_CPMU_EEE_CTRL,
8258                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8259
8260                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8261                       TG3_CPMU_EEEMD_LPI_IN_TX |
8262                       TG3_CPMU_EEEMD_LPI_IN_RX |
8263                       TG3_CPMU_EEEMD_EEE_ENABLE;
8264
8265                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8266                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8267
8268                 if (tg3_flag(tp, ENABLE_APE))
8269                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8270
8271                 tw32_f(TG3_CPMU_EEE_MODE, val);
8272
8273                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8274                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8275                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8276
8277                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8278                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8279                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8280         }
8281
8282         if (reset_phy)
8283                 tg3_phy_reset(tp);
8284
8285         err = tg3_chip_reset(tp);
8286         if (err)
8287                 return err;
8288
8289         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8290
8291         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8292                 val = tr32(TG3_CPMU_CTRL);
8293                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8294                 tw32(TG3_CPMU_CTRL, val);
8295
8296                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8297                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8298                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8299                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8300
8301                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8302                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8303                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8304                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8305
8306                 val = tr32(TG3_CPMU_HST_ACC);
8307                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8308                 val |= CPMU_HST_ACC_MACCLK_6_25;
8309                 tw32(TG3_CPMU_HST_ACC, val);
8310         }
8311
8312         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8313                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8314                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8315                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8316                 tw32(PCIE_PWR_MGMT_THRESH, val);
8317
8318                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8319                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8320
8321                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8322
8323                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8324                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8325         }
8326
8327         if (tg3_flag(tp, L1PLLPD_EN)) {
8328                 u32 grc_mode = tr32(GRC_MODE);
8329
8330                 /* Access the lower 1K of PL PCIE block registers. */
8331                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8332                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8333
8334                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8335                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8336                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8337
8338                 tw32(GRC_MODE, grc_mode);
8339         }
8340
8341         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8342                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8343                         u32 grc_mode = tr32(GRC_MODE);
8344
8345                         /* Access the lower 1K of PL PCIE block registers. */
8346                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8347                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8348
8349                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8350                                    TG3_PCIE_PL_LO_PHYCTL5);
8351                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8352                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8353
8354                         tw32(GRC_MODE, grc_mode);
8355                 }
8356
8357                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8358                         u32 grc_mode = tr32(GRC_MODE);
8359
8360                         /* Access the lower 1K of DL PCIE block registers. */
8361                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8362                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8363
8364                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8365                                    TG3_PCIE_DL_LO_FTSMAX);
8366                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8367                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8368                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8369
8370                         tw32(GRC_MODE, grc_mode);
8371                 }
8372
8373                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8374                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8375                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8376                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8377         }
8378
8379         /* This works around an issue with Athlon chipsets on
8380          * B3 tigon3 silicon.  This bit has no effect on any
8381          * other revision.  But do not set this on PCI Express
8382          * chips and don't even touch the clocks if the CPMU is present.
8383          */
8384         if (!tg3_flag(tp, CPMU_PRESENT)) {
8385                 if (!tg3_flag(tp, PCI_EXPRESS))
8386                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8387                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8388         }
8389
8390         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8391             tg3_flag(tp, PCIX_MODE)) {
8392                 val = tr32(TG3PCI_PCISTATE);
8393                 val |= PCISTATE_RETRY_SAME_DMA;
8394                 tw32(TG3PCI_PCISTATE, val);
8395         }
8396
8397         if (tg3_flag(tp, ENABLE_APE)) {
8398                 /* Allow reads and writes to the
8399                  * APE register and memory space.
8400                  */
8401                 val = tr32(TG3PCI_PCISTATE);
8402                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8403                        PCISTATE_ALLOW_APE_SHMEM_WR |
8404                        PCISTATE_ALLOW_APE_PSPACE_WR;
8405                 tw32(TG3PCI_PCISTATE, val);
8406         }
8407
8408         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8409                 /* Enable some hw fixes.  */
8410                 val = tr32(TG3PCI_MSI_DATA);
8411                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8412                 tw32(TG3PCI_MSI_DATA, val);
8413         }
8414
8415         /* Descriptor ring init may make accesses to the
8416          * NIC SRAM area to setup the TX descriptors, so we
8417          * can only do this after the hardware has been
8418          * successfully reset.
8419          */
8420         err = tg3_init_rings(tp);
8421         if (err)
8422                 return err;
8423
8424         if (tg3_flag(tp, 57765_PLUS)) {
8425                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8426                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8427                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8428                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8429                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8430                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8431                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8432                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8433         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8434                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8435                 /* This value is determined during the probe time DMA
8436                  * engine test, tg3_test_dma.
8437                  */
8438                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8439         }
8440
8441         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8442                           GRC_MODE_4X_NIC_SEND_RINGS |
8443                           GRC_MODE_NO_TX_PHDR_CSUM |
8444                           GRC_MODE_NO_RX_PHDR_CSUM);
8445         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8446
8447         /* Pseudo-header checksum is done by hardware logic and not
8448          * the offload processers, so make the chip do the pseudo-
8449          * header checksums on receive.  For transmit it is more
8450          * convenient to do the pseudo-header checksum in software
8451          * as Linux does that on transmit for us in all cases.
8452          */
8453         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8454
8455         tw32(GRC_MODE,
8456              tp->grc_mode |
8457              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8458
8459         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8460         val = tr32(GRC_MISC_CFG);
8461         val &= ~0xff;
8462         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8463         tw32(GRC_MISC_CFG, val);
8464
8465         /* Initialize MBUF/DESC pool. */
8466         if (tg3_flag(tp, 5750_PLUS)) {
8467                 /* Do nothing.  */
8468         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8469                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8470                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8471                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8472                 else
8473                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8474                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8475                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8476         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8477                 int fw_len;
8478
8479                 fw_len = tp->fw_len;
8480                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8481                 tw32(BUFMGR_MB_POOL_ADDR,
8482                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8483                 tw32(BUFMGR_MB_POOL_SIZE,
8484                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8485         }
8486
8487         if (tp->dev->mtu <= ETH_DATA_LEN) {
8488                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8489                      tp->bufmgr_config.mbuf_read_dma_low_water);
8490                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8491                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8492                 tw32(BUFMGR_MB_HIGH_WATER,
8493                      tp->bufmgr_config.mbuf_high_water);
8494         } else {
8495                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8496                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8497                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8498                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8499                 tw32(BUFMGR_MB_HIGH_WATER,
8500                      tp->bufmgr_config.mbuf_high_water_jumbo);
8501         }
8502         tw32(BUFMGR_DMA_LOW_WATER,
8503              tp->bufmgr_config.dma_low_water);
8504         tw32(BUFMGR_DMA_HIGH_WATER,
8505              tp->bufmgr_config.dma_high_water);
8506
8507         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8508         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8509                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8510         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8511             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8512             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8513                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8514         tw32(BUFMGR_MODE, val);
8515         for (i = 0; i < 2000; i++) {
8516                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8517                         break;
8518                 udelay(10);
8519         }
8520         if (i >= 2000) {
8521                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8522                 return -ENODEV;
8523         }
8524
8525         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8526                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8527
8528         tg3_setup_rxbd_thresholds(tp);
8529
8530         /* Initialize TG3_BDINFO's at:
8531          *  RCVDBDI_STD_BD:     standard eth size rx ring
8532          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8533          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8534          *
8535          * like so:
8536          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8537          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8538          *                              ring attribute flags
8539          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8540          *
8541          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8542          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8543          *
8544          * The size of each ring is fixed in the firmware, but the location is
8545          * configurable.
8546          */
8547         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8548              ((u64) tpr->rx_std_mapping >> 32));
8549         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8550              ((u64) tpr->rx_std_mapping & 0xffffffff));
8551         if (!tg3_flag(tp, 5717_PLUS))
8552                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8553                      NIC_SRAM_RX_BUFFER_DESC);
8554
8555         /* Disable the mini ring */
8556         if (!tg3_flag(tp, 5705_PLUS))
8557                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8558                      BDINFO_FLAGS_DISABLED);
8559
8560         /* Program the jumbo buffer descriptor ring control
8561          * blocks on those devices that have them.
8562          */
8563         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8564             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8565
8566                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8567                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8568                              ((u64) tpr->rx_jmb_mapping >> 32));
8569                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8570                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8571                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8572                               BDINFO_FLAGS_MAXLEN_SHIFT;
8573                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8574                              val | BDINFO_FLAGS_USE_EXT_RECV);
8575                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8576                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8577                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8578                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8579                 } else {
8580                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8581                              BDINFO_FLAGS_DISABLED);
8582                 }
8583
8584                 if (tg3_flag(tp, 57765_PLUS)) {
8585                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8586                                 val = TG3_RX_STD_MAX_SIZE_5700;
8587                         else
8588                                 val = TG3_RX_STD_MAX_SIZE_5717;
8589                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8590                         val |= (TG3_RX_STD_DMA_SZ << 2);
8591                 } else
8592                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8593         } else
8594                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8595
8596         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8597
8598         tpr->rx_std_prod_idx = tp->rx_pending;
8599         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8600
8601         tpr->rx_jmb_prod_idx =
8602                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8603         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8604
8605         tg3_rings_reset(tp);
8606
8607         /* Initialize MAC address and backoff seed. */
8608         __tg3_set_mac_addr(tp, 0);
8609
8610         /* MTU + ethernet header + FCS + optional VLAN tag */
8611         tw32(MAC_RX_MTU_SIZE,
8612              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8613
8614         /* The slot time is changed by tg3_setup_phy if we
8615          * run at gigabit with half duplex.
8616          */
8617         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8618               (6 << TX_LENGTHS_IPG_SHIFT) |
8619               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8620
8621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8622                 val |= tr32(MAC_TX_LENGTHS) &
8623                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8624                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8625
8626         tw32(MAC_TX_LENGTHS, val);
8627
8628         /* Receive rules. */
8629         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8630         tw32(RCVLPC_CONFIG, 0x0181);
8631
8632         /* Calculate RDMAC_MODE setting early, we need it to determine
8633          * the RCVLPC_STATE_ENABLE mask.
8634          */
8635         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8636                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8637                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8638                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8639                       RDMAC_MODE_LNGREAD_ENAB);
8640
8641         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8642                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8643
8644         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8645             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8646             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8647                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8648                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8649                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8650
8651         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8652             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8653                 if (tg3_flag(tp, TSO_CAPABLE) &&
8654                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8655                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8656                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8657                            !tg3_flag(tp, IS_5788)) {
8658                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8659                 }
8660         }
8661
8662         if (tg3_flag(tp, PCI_EXPRESS))
8663                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8664
8665         if (tg3_flag(tp, HW_TSO_1) ||
8666             tg3_flag(tp, HW_TSO_2) ||
8667             tg3_flag(tp, HW_TSO_3))
8668                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8669
8670         if (tg3_flag(tp, 57765_PLUS) ||
8671             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8672             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8673                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8674
8675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8676                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8677
8678         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8679             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8682             tg3_flag(tp, 57765_PLUS)) {
8683                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8684                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8685                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8686                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8687                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8688                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8689                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8690                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8691                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8692                 }
8693                 tw32(TG3_RDMA_RSRVCTRL_REG,
8694                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8695         }
8696
8697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8698             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8699                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8700                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8701                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8702                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8703         }
8704
8705         /* Receive/send statistics. */
8706         if (tg3_flag(tp, 5750_PLUS)) {
8707                 val = tr32(RCVLPC_STATS_ENABLE);
8708                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8709                 tw32(RCVLPC_STATS_ENABLE, val);
8710         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8711                    tg3_flag(tp, TSO_CAPABLE)) {
8712                 val = tr32(RCVLPC_STATS_ENABLE);
8713                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8714                 tw32(RCVLPC_STATS_ENABLE, val);
8715         } else {
8716                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8717         }
8718         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8719         tw32(SNDDATAI_STATSENAB, 0xffffff);
8720         tw32(SNDDATAI_STATSCTRL,
8721              (SNDDATAI_SCTRL_ENABLE |
8722               SNDDATAI_SCTRL_FASTUPD));
8723
8724         /* Setup host coalescing engine. */
8725         tw32(HOSTCC_MODE, 0);
8726         for (i = 0; i < 2000; i++) {
8727                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8728                         break;
8729                 udelay(10);
8730         }
8731
8732         __tg3_set_coalesce(tp, &tp->coal);
8733
8734         if (!tg3_flag(tp, 5705_PLUS)) {
8735                 /* Status/statistics block address.  See tg3_timer,
8736                  * the tg3_periodic_fetch_stats call there, and
8737                  * tg3_get_stats to see how this works for 5705/5750 chips.
8738                  */
8739                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8740                      ((u64) tp->stats_mapping >> 32));
8741                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8742                      ((u64) tp->stats_mapping & 0xffffffff));
8743                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8744
8745                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8746
8747                 /* Clear statistics and status block memory areas */
8748                 for (i = NIC_SRAM_STATS_BLK;
8749                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8750                      i += sizeof(u32)) {
8751                         tg3_write_mem(tp, i, 0);
8752                         udelay(40);
8753                 }
8754         }
8755
8756         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8757
8758         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8759         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8760         if (!tg3_flag(tp, 5705_PLUS))
8761                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8762
8763         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8764                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8765                 /* reset to prevent losing 1st rx packet intermittently */
8766                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8767                 udelay(10);
8768         }
8769
8770         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8771                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8772                         MAC_MODE_FHDE_ENABLE;
8773         if (tg3_flag(tp, ENABLE_APE))
8774                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8775         if (!tg3_flag(tp, 5705_PLUS) &&
8776             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8777             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8778                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8779         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8780         udelay(40);
8781
8782         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8783          * If TG3_FLAG_IS_NIC is zero, we should read the
8784          * register to preserve the GPIO settings for LOMs. The GPIOs,
8785          * whether used as inputs or outputs, are set by boot code after
8786          * reset.
8787          */
8788         if (!tg3_flag(tp, IS_NIC)) {
8789                 u32 gpio_mask;
8790
8791                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8792                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8793                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8794
8795                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8796                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8797                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8798
8799                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8800                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8801
8802                 tp->grc_local_ctrl &= ~gpio_mask;
8803                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8804
8805                 /* GPIO1 must be driven high for eeprom write protect */
8806                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8807                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8808                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8809         }
8810         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8811         udelay(100);
8812
8813         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8814                 val = tr32(MSGINT_MODE);
8815                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8816                 if (!tg3_flag(tp, 1SHOT_MSI))
8817                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8818                 tw32(MSGINT_MODE, val);
8819         }
8820
8821         if (!tg3_flag(tp, 5705_PLUS)) {
8822                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8823                 udelay(40);
8824         }
8825
8826         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8827                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8828                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8829                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8830                WDMAC_MODE_LNGREAD_ENAB);
8831
8832         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8833             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8834                 if (tg3_flag(tp, TSO_CAPABLE) &&
8835                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8836                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8837                         /* nothing */
8838                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8839                            !tg3_flag(tp, IS_5788)) {
8840                         val |= WDMAC_MODE_RX_ACCEL;
8841                 }
8842         }
8843
8844         /* Enable host coalescing bug fix */
8845         if (tg3_flag(tp, 5755_PLUS))
8846                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8847
8848         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8849                 val |= WDMAC_MODE_BURST_ALL_DATA;
8850
8851         tw32_f(WDMAC_MODE, val);
8852         udelay(40);
8853
8854         if (tg3_flag(tp, PCIX_MODE)) {
8855                 u16 pcix_cmd;
8856
8857                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8858                                      &pcix_cmd);
8859                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8860                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8861                         pcix_cmd |= PCI_X_CMD_READ_2K;
8862                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8863                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8864                         pcix_cmd |= PCI_X_CMD_READ_2K;
8865                 }
8866                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8867                                       pcix_cmd);
8868         }
8869
8870         tw32_f(RDMAC_MODE, rdmac_mode);
8871         udelay(40);
8872
8873         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8874         if (!tg3_flag(tp, 5705_PLUS))
8875                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8876
8877         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8878                 tw32(SNDDATAC_MODE,
8879                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8880         else
8881                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8882
8883         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8884         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8885         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8886         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8887                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8888         tw32(RCVDBDI_MODE, val);
8889         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8890         if (tg3_flag(tp, HW_TSO_1) ||
8891             tg3_flag(tp, HW_TSO_2) ||
8892             tg3_flag(tp, HW_TSO_3))
8893                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8894         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8895         if (tg3_flag(tp, ENABLE_TSS))
8896                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8897         tw32(SNDBDI_MODE, val);
8898         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8899
8900         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8901                 err = tg3_load_5701_a0_firmware_fix(tp);
8902                 if (err)
8903                         return err;
8904         }
8905
8906         if (tg3_flag(tp, TSO_CAPABLE)) {
8907                 err = tg3_load_tso_firmware(tp);
8908                 if (err)
8909                         return err;
8910         }
8911
8912         tp->tx_mode = TX_MODE_ENABLE;
8913
8914         if (tg3_flag(tp, 5755_PLUS) ||
8915             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8916                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8917
8918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8919                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8920                 tp->tx_mode &= ~val;
8921                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8922         }
8923
8924         tw32_f(MAC_TX_MODE, tp->tx_mode);
8925         udelay(100);
8926
8927         if (tg3_flag(tp, ENABLE_RSS)) {
8928                 int i = 0;
8929                 u32 reg = MAC_RSS_INDIR_TBL_0;
8930
8931                 if (tp->irq_cnt == 2) {
8932                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8933                                 tw32(reg, 0x0);
8934                                 reg += 4;
8935                         }
8936                 } else {
8937                         u32 val;
8938
8939                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8940                                 val = i % (tp->irq_cnt - 1);
8941                                 i++;
8942                                 for (; i % 8; i++) {
8943                                         val <<= 4;
8944                                         val |= (i % (tp->irq_cnt - 1));
8945                                 }
8946                                 tw32(reg, val);
8947                                 reg += 4;
8948                         }
8949                 }
8950
8951                 /* Setup the "secret" hash key. */
8952                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8953                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8954                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8955                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8956                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8957                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8958                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8959                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8960                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8961                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8962         }
8963
8964         tp->rx_mode = RX_MODE_ENABLE;
8965         if (tg3_flag(tp, 5755_PLUS))
8966                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8967
8968         if (tg3_flag(tp, ENABLE_RSS))
8969                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8970                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8971                                RX_MODE_RSS_IPV6_HASH_EN |
8972                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8973                                RX_MODE_RSS_IPV4_HASH_EN |
8974                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8975
8976         tw32_f(MAC_RX_MODE, tp->rx_mode);
8977         udelay(10);
8978
8979         tw32(MAC_LED_CTRL, tp->led_ctrl);
8980
8981         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8982         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8983                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8984                 udelay(10);
8985         }
8986         tw32_f(MAC_RX_MODE, tp->rx_mode);
8987         udelay(10);
8988
8989         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8990                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8991                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8992                         /* Set drive transmission level to 1.2V  */
8993                         /* only if the signal pre-emphasis bit is not set  */
8994                         val = tr32(MAC_SERDES_CFG);
8995                         val &= 0xfffff000;
8996                         val |= 0x880;
8997                         tw32(MAC_SERDES_CFG, val);
8998                 }
8999                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9000                         tw32(MAC_SERDES_CFG, 0x616000);
9001         }
9002
9003         /* Prevent chip from dropping frames when flow control
9004          * is enabled.
9005          */
9006         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9007                 val = 1;
9008         else
9009                 val = 2;
9010         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9011
9012         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9013             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9014                 /* Use hardware link auto-negotiation */
9015                 tg3_flag_set(tp, HW_AUTONEG);
9016         }
9017
9018         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9019             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9020                 u32 tmp;
9021
9022                 tmp = tr32(SERDES_RX_CTRL);
9023                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9024                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9025                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9026                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9027         }
9028
9029         if (!tg3_flag(tp, USE_PHYLIB)) {
9030                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9031                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9032                         tp->link_config.speed = tp->link_config.orig_speed;
9033                         tp->link_config.duplex = tp->link_config.orig_duplex;
9034                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9035                 }
9036
9037                 err = tg3_setup_phy(tp, 0);
9038                 if (err)
9039                         return err;
9040
9041                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9042                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9043                         u32 tmp;
9044
9045                         /* Clear CRC stats. */
9046                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9047                                 tg3_writephy(tp, MII_TG3_TEST1,
9048                                              tmp | MII_TG3_TEST1_CRC_EN);
9049                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9050                         }
9051                 }
9052         }
9053
9054         __tg3_set_rx_mode(tp->dev);
9055
9056         /* Initialize receive rules. */
9057         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9058         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9059         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9060         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9061
9062         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9063                 limit = 8;
9064         else
9065                 limit = 16;
9066         if (tg3_flag(tp, ENABLE_ASF))
9067                 limit -= 4;
9068         switch (limit) {
9069         case 16:
9070                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9071         case 15:
9072                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9073         case 14:
9074                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9075         case 13:
9076                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9077         case 12:
9078                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9079         case 11:
9080                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9081         case 10:
9082                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9083         case 9:
9084                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9085         case 8:
9086                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9087         case 7:
9088                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9089         case 6:
9090                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9091         case 5:
9092                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9093         case 4:
9094                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9095         case 3:
9096                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9097         case 2:
9098         case 1:
9099
9100         default:
9101                 break;
9102         }
9103
9104         if (tg3_flag(tp, ENABLE_APE))
9105                 /* Write our heartbeat update interval to APE. */
9106                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9107                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9108
9109         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9110
9111         return 0;
9112 }
9113
9114 /* Called at device open time to get the chip ready for
9115  * packet processing.  Invoked with tp->lock held.
9116  */
9117 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9118 {
9119         tg3_switch_clocks(tp);
9120
9121         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9122
9123         return tg3_reset_hw(tp, reset_phy);
9124 }
9125
9126 #define TG3_STAT_ADD32(PSTAT, REG) \
9127 do {    u32 __val = tr32(REG); \
9128         (PSTAT)->low += __val; \
9129         if ((PSTAT)->low < __val) \
9130                 (PSTAT)->high += 1; \
9131 } while (0)
9132
9133 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9134 {
9135         struct tg3_hw_stats *sp = tp->hw_stats;
9136
9137         if (!netif_carrier_ok(tp->dev))
9138                 return;
9139
9140         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9141         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9142         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9143         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9144         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9145         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9146         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9147         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9148         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9149         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9150         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9151         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9152         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9153
9154         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9155         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9156         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9157         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9158         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9159         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9160         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9161         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9162         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9163         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9164         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9165         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9166         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9167         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9168
9169         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9170         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9171             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9172             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9173                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9174         } else {
9175                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9176                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9177                 if (val) {
9178                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9179                         sp->rx_discards.low += val;
9180                         if (sp->rx_discards.low < val)
9181                                 sp->rx_discards.high += 1;
9182                 }
9183                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9184         }
9185         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9186 }
9187
9188 static void tg3_chk_missed_msi(struct tg3 *tp)
9189 {
9190         u32 i;
9191
9192         for (i = 0; i < tp->irq_cnt; i++) {
9193                 struct tg3_napi *tnapi = &tp->napi[i];
9194
9195                 if (tg3_has_work(tnapi)) {
9196                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9197                             tnapi->last_tx_cons == tnapi->tx_cons) {
9198                                 if (tnapi->chk_msi_cnt < 1) {
9199                                         tnapi->chk_msi_cnt++;
9200                                         return;
9201                                 }
9202                                 tg3_msi(0, tnapi);
9203                         }
9204                 }
9205                 tnapi->chk_msi_cnt = 0;
9206                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9207                 tnapi->last_tx_cons = tnapi->tx_cons;
9208         }
9209 }
9210
9211 static void tg3_timer(unsigned long __opaque)
9212 {
9213         struct tg3 *tp = (struct tg3 *) __opaque;
9214
9215         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9216                 goto restart_timer;
9217
9218         spin_lock(&tp->lock);
9219
9220         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9221             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9222                 tg3_chk_missed_msi(tp);
9223
9224         if (!tg3_flag(tp, TAGGED_STATUS)) {
9225                 /* All of this garbage is because when using non-tagged
9226                  * IRQ status the mailbox/status_block protocol the chip
9227                  * uses with the cpu is race prone.
9228                  */
9229                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9230                         tw32(GRC_LOCAL_CTRL,
9231                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9232                 } else {
9233                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9234                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9235                 }
9236
9237                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9238                         spin_unlock(&tp->lock);
9239                         tg3_reset_task_schedule(tp);
9240                         goto restart_timer;
9241                 }
9242         }
9243
9244         /* This part only runs once per second. */
9245         if (!--tp->timer_counter) {
9246                 if (tg3_flag(tp, 5705_PLUS))
9247                         tg3_periodic_fetch_stats(tp);
9248
9249                 if (tp->setlpicnt && !--tp->setlpicnt)
9250                         tg3_phy_eee_enable(tp);
9251
9252                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9253                         u32 mac_stat;
9254                         int phy_event;
9255
9256                         mac_stat = tr32(MAC_STATUS);
9257
9258                         phy_event = 0;
9259                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9260                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9261                                         phy_event = 1;
9262                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9263                                 phy_event = 1;
9264
9265                         if (phy_event)
9266                                 tg3_setup_phy(tp, 0);
9267                 } else if (tg3_flag(tp, POLL_SERDES)) {
9268                         u32 mac_stat = tr32(MAC_STATUS);
9269                         int need_setup = 0;
9270
9271                         if (netif_carrier_ok(tp->dev) &&
9272                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9273                                 need_setup = 1;
9274                         }
9275                         if (!netif_carrier_ok(tp->dev) &&
9276                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9277                                          MAC_STATUS_SIGNAL_DET))) {
9278                                 need_setup = 1;
9279                         }
9280                         if (need_setup) {
9281                                 if (!tp->serdes_counter) {
9282                                         tw32_f(MAC_MODE,
9283                                              (tp->mac_mode &
9284                                               ~MAC_MODE_PORT_MODE_MASK));
9285                                         udelay(40);
9286                                         tw32_f(MAC_MODE, tp->mac_mode);
9287                                         udelay(40);
9288                                 }
9289                                 tg3_setup_phy(tp, 0);
9290                         }
9291                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9292                            tg3_flag(tp, 5780_CLASS)) {
9293                         tg3_serdes_parallel_detect(tp);
9294                 }
9295
9296                 tp->timer_counter = tp->timer_multiplier;
9297         }
9298
9299         /* Heartbeat is only sent once every 2 seconds.
9300          *
9301          * The heartbeat is to tell the ASF firmware that the host
9302          * driver is still alive.  In the event that the OS crashes,
9303          * ASF needs to reset the hardware to free up the FIFO space
9304          * that may be filled with rx packets destined for the host.
9305          * If the FIFO is full, ASF will no longer function properly.
9306          *
9307          * Unintended resets have been reported on real time kernels
9308          * where the timer doesn't run on time.  Netpoll will also have
9309          * same problem.
9310          *
9311          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9312          * to check the ring condition when the heartbeat is expiring
9313          * before doing the reset.  This will prevent most unintended
9314          * resets.
9315          */
9316         if (!--tp->asf_counter) {
9317                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9318                         tg3_wait_for_event_ack(tp);
9319
9320                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9321                                       FWCMD_NICDRV_ALIVE3);
9322                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9323                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9324                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9325
9326                         tg3_generate_fw_event(tp);
9327                 }
9328                 tp->asf_counter = tp->asf_multiplier;
9329         }
9330
9331         spin_unlock(&tp->lock);
9332
9333 restart_timer:
9334         tp->timer.expires = jiffies + tp->timer_offset;
9335         add_timer(&tp->timer);
9336 }
9337
9338 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9339 {
9340         irq_handler_t fn;
9341         unsigned long flags;
9342         char *name;
9343         struct tg3_napi *tnapi = &tp->napi[irq_num];
9344
9345         if (tp->irq_cnt == 1)
9346                 name = tp->dev->name;
9347         else {
9348                 name = &tnapi->irq_lbl[0];
9349                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9350                 name[IFNAMSIZ-1] = 0;
9351         }
9352
9353         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9354                 fn = tg3_msi;
9355                 if (tg3_flag(tp, 1SHOT_MSI))
9356                         fn = tg3_msi_1shot;
9357                 flags = 0;
9358         } else {
9359                 fn = tg3_interrupt;
9360                 if (tg3_flag(tp, TAGGED_STATUS))
9361                         fn = tg3_interrupt_tagged;
9362                 flags = IRQF_SHARED;
9363         }
9364
9365         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9366 }
9367
9368 static int tg3_test_interrupt(struct tg3 *tp)
9369 {
9370         struct tg3_napi *tnapi = &tp->napi[0];
9371         struct net_device *dev = tp->dev;
9372         int err, i, intr_ok = 0;
9373         u32 val;
9374
9375         if (!netif_running(dev))
9376                 return -ENODEV;
9377
9378         tg3_disable_ints(tp);
9379
9380         free_irq(tnapi->irq_vec, tnapi);
9381
9382         /*
9383          * Turn off MSI one shot mode.  Otherwise this test has no
9384          * observable way to know whether the interrupt was delivered.
9385          */
9386         if (tg3_flag(tp, 57765_PLUS)) {
9387                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9388                 tw32(MSGINT_MODE, val);
9389         }
9390
9391         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9392                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9393         if (err)
9394                 return err;
9395
9396         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9397         tg3_enable_ints(tp);
9398
9399         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9400                tnapi->coal_now);
9401
9402         for (i = 0; i < 5; i++) {
9403                 u32 int_mbox, misc_host_ctrl;
9404
9405                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9406                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9407
9408                 if ((int_mbox != 0) ||
9409                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9410                         intr_ok = 1;
9411                         break;
9412                 }
9413
9414                 if (tg3_flag(tp, 57765_PLUS) &&
9415                     tnapi->hw_status->status_tag != tnapi->last_tag)
9416                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9417
9418                 msleep(10);
9419         }
9420
9421         tg3_disable_ints(tp);
9422
9423         free_irq(tnapi->irq_vec, tnapi);
9424
9425         err = tg3_request_irq(tp, 0);
9426
9427         if (err)
9428                 return err;
9429
9430         if (intr_ok) {
9431                 /* Reenable MSI one shot mode. */
9432                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9433                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9434                         tw32(MSGINT_MODE, val);
9435                 }
9436                 return 0;
9437         }
9438
9439         return -EIO;
9440 }
9441
9442 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9443  * successfully restored
9444  */
9445 static int tg3_test_msi(struct tg3 *tp)
9446 {
9447         int err;
9448         u16 pci_cmd;
9449
9450         if (!tg3_flag(tp, USING_MSI))
9451                 return 0;
9452
9453         /* Turn off SERR reporting in case MSI terminates with Master
9454          * Abort.
9455          */
9456         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9457         pci_write_config_word(tp->pdev, PCI_COMMAND,
9458                               pci_cmd & ~PCI_COMMAND_SERR);
9459
9460         err = tg3_test_interrupt(tp);
9461
9462         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9463
9464         if (!err)
9465                 return 0;
9466
9467         /* other failures */
9468         if (err != -EIO)
9469                 return err;
9470
9471         /* MSI test failed, go back to INTx mode */
9472         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9473                     "to INTx mode. Please report this failure to the PCI "
9474                     "maintainer and include system chipset information\n");
9475
9476         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9477
9478         pci_disable_msi(tp->pdev);
9479
9480         tg3_flag_clear(tp, USING_MSI);
9481         tp->napi[0].irq_vec = tp->pdev->irq;
9482
9483         err = tg3_request_irq(tp, 0);
9484         if (err)
9485                 return err;
9486
9487         /* Need to reset the chip because the MSI cycle may have terminated
9488          * with Master Abort.
9489          */
9490         tg3_full_lock(tp, 1);
9491
9492         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9493         err = tg3_init_hw(tp, 1);
9494
9495         tg3_full_unlock(tp);
9496
9497         if (err)
9498                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9499
9500         return err;
9501 }
9502
9503 static int tg3_request_firmware(struct tg3 *tp)
9504 {
9505         const __be32 *fw_data;
9506
9507         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9508                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9509                            tp->fw_needed);
9510                 return -ENOENT;
9511         }
9512
9513         fw_data = (void *)tp->fw->data;
9514
9515         /* Firmware blob starts with version numbers, followed by
9516          * start address and _full_ length including BSS sections
9517          * (which must be longer than the actual data, of course
9518          */
9519
9520         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9521         if (tp->fw_len < (tp->fw->size - 12)) {
9522                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9523                            tp->fw_len, tp->fw_needed);
9524                 release_firmware(tp->fw);
9525                 tp->fw = NULL;
9526                 return -EINVAL;
9527         }
9528
9529         /* We no longer need firmware; we have it. */
9530         tp->fw_needed = NULL;
9531         return 0;
9532 }
9533
9534 static bool tg3_enable_msix(struct tg3 *tp)
9535 {
9536         int i, rc, cpus = num_online_cpus();
9537         struct msix_entry msix_ent[tp->irq_max];
9538
9539         if (cpus == 1)
9540                 /* Just fallback to the simpler MSI mode. */
9541                 return false;
9542
9543         /*
9544          * We want as many rx rings enabled as there are cpus.
9545          * The first MSIX vector only deals with link interrupts, etc,
9546          * so we add one to the number of vectors we are requesting.
9547          */
9548         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9549
9550         for (i = 0; i < tp->irq_max; i++) {
9551                 msix_ent[i].entry  = i;
9552                 msix_ent[i].vector = 0;
9553         }
9554
9555         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9556         if (rc < 0) {
9557                 return false;
9558         } else if (rc != 0) {
9559                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9560                         return false;
9561                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9562                               tp->irq_cnt, rc);
9563                 tp->irq_cnt = rc;
9564         }
9565
9566         for (i = 0; i < tp->irq_max; i++)
9567                 tp->napi[i].irq_vec = msix_ent[i].vector;
9568
9569         netif_set_real_num_tx_queues(tp->dev, 1);
9570         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9571         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9572                 pci_disable_msix(tp->pdev);
9573                 return false;
9574         }
9575
9576         if (tp->irq_cnt > 1) {
9577                 tg3_flag_set(tp, ENABLE_RSS);
9578
9579                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9580                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9581                         tg3_flag_set(tp, ENABLE_TSS);
9582                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9583                 }
9584         }
9585
9586         return true;
9587 }
9588
9589 static void tg3_ints_init(struct tg3 *tp)
9590 {
9591         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9592             !tg3_flag(tp, TAGGED_STATUS)) {
9593                 /* All MSI supporting chips should support tagged
9594                  * status.  Assert that this is the case.
9595                  */
9596                 netdev_warn(tp->dev,
9597                             "MSI without TAGGED_STATUS? Not using MSI\n");
9598                 goto defcfg;
9599         }
9600
9601         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9602                 tg3_flag_set(tp, USING_MSIX);
9603         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9604                 tg3_flag_set(tp, USING_MSI);
9605
9606         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9607                 u32 msi_mode = tr32(MSGINT_MODE);
9608                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9609                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9610                 if (!tg3_flag(tp, 1SHOT_MSI))
9611                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9612                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9613         }
9614 defcfg:
9615         if (!tg3_flag(tp, USING_MSIX)) {
9616                 tp->irq_cnt = 1;
9617                 tp->napi[0].irq_vec = tp->pdev->irq;
9618                 netif_set_real_num_tx_queues(tp->dev, 1);
9619                 netif_set_real_num_rx_queues(tp->dev, 1);
9620         }
9621 }
9622
9623 static void tg3_ints_fini(struct tg3 *tp)
9624 {
9625         if (tg3_flag(tp, USING_MSIX))
9626                 pci_disable_msix(tp->pdev);
9627         else if (tg3_flag(tp, USING_MSI))
9628                 pci_disable_msi(tp->pdev);
9629         tg3_flag_clear(tp, USING_MSI);
9630         tg3_flag_clear(tp, USING_MSIX);
9631         tg3_flag_clear(tp, ENABLE_RSS);
9632         tg3_flag_clear(tp, ENABLE_TSS);
9633 }
9634
9635 static int tg3_open(struct net_device *dev)
9636 {
9637         struct tg3 *tp = netdev_priv(dev);
9638         int i, err;
9639
9640         if (tp->fw_needed) {
9641                 err = tg3_request_firmware(tp);
9642                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9643                         if (err)
9644                                 return err;
9645                 } else if (err) {
9646                         netdev_warn(tp->dev, "TSO capability disabled\n");
9647                         tg3_flag_clear(tp, TSO_CAPABLE);
9648                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9649                         netdev_notice(tp->dev, "TSO capability restored\n");
9650                         tg3_flag_set(tp, TSO_CAPABLE);
9651                 }
9652         }
9653
9654         netif_carrier_off(tp->dev);
9655
9656         err = tg3_power_up(tp);
9657         if (err)
9658                 return err;
9659
9660         tg3_full_lock(tp, 0);
9661
9662         tg3_disable_ints(tp);
9663         tg3_flag_clear(tp, INIT_COMPLETE);
9664
9665         tg3_full_unlock(tp);
9666
9667         /*
9668          * Setup interrupts first so we know how
9669          * many NAPI resources to allocate
9670          */
9671         tg3_ints_init(tp);
9672
9673         /* The placement of this call is tied
9674          * to the setup and use of Host TX descriptors.
9675          */
9676         err = tg3_alloc_consistent(tp);
9677         if (err)
9678                 goto err_out1;
9679
9680         tg3_napi_init(tp);
9681
9682         tg3_napi_enable(tp);
9683
9684         for (i = 0; i < tp->irq_cnt; i++) {
9685                 struct tg3_napi *tnapi = &tp->napi[i];
9686                 err = tg3_request_irq(tp, i);
9687                 if (err) {
9688                         for (i--; i >= 0; i--) {
9689                                 tnapi = &tp->napi[i];
9690                                 free_irq(tnapi->irq_vec, tnapi);
9691                         }
9692                         goto err_out2;
9693                 }
9694         }
9695
9696         tg3_full_lock(tp, 0);
9697
9698         err = tg3_init_hw(tp, 1);
9699         if (err) {
9700                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9701                 tg3_free_rings(tp);
9702         } else {
9703                 if (tg3_flag(tp, TAGGED_STATUS) &&
9704                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9705                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9706                         tp->timer_offset = HZ;
9707                 else
9708                         tp->timer_offset = HZ / 10;
9709
9710                 BUG_ON(tp->timer_offset > HZ);
9711                 tp->timer_counter = tp->timer_multiplier =
9712                         (HZ / tp->timer_offset);
9713                 tp->asf_counter = tp->asf_multiplier =
9714                         ((HZ / tp->timer_offset) * 2);
9715
9716                 init_timer(&tp->timer);
9717                 tp->timer.expires = jiffies + tp->timer_offset;
9718                 tp->timer.data = (unsigned long) tp;
9719                 tp->timer.function = tg3_timer;
9720         }
9721
9722         tg3_full_unlock(tp);
9723
9724         if (err)
9725                 goto err_out3;
9726
9727         if (tg3_flag(tp, USING_MSI)) {
9728                 err = tg3_test_msi(tp);
9729
9730                 if (err) {
9731                         tg3_full_lock(tp, 0);
9732                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9733                         tg3_free_rings(tp);
9734                         tg3_full_unlock(tp);
9735
9736                         goto err_out2;
9737                 }
9738
9739                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9740                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9741
9742                         tw32(PCIE_TRANSACTION_CFG,
9743                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9744                 }
9745         }
9746
9747         tg3_phy_start(tp);
9748
9749         tg3_full_lock(tp, 0);
9750
9751         add_timer(&tp->timer);
9752         tg3_flag_set(tp, INIT_COMPLETE);
9753         tg3_enable_ints(tp);
9754
9755         tg3_full_unlock(tp);
9756
9757         netif_tx_start_all_queues(dev);
9758
9759         /*
9760          * Reset loopback feature if it was turned on while the device was down
9761          * make sure that it's installed properly now.
9762          */
9763         if (dev->features & NETIF_F_LOOPBACK)
9764                 tg3_set_loopback(dev, dev->features);
9765
9766         return 0;
9767
9768 err_out3:
9769         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9770                 struct tg3_napi *tnapi = &tp->napi[i];
9771                 free_irq(tnapi->irq_vec, tnapi);
9772         }
9773
9774 err_out2:
9775         tg3_napi_disable(tp);
9776         tg3_napi_fini(tp);
9777         tg3_free_consistent(tp);
9778
9779 err_out1:
9780         tg3_ints_fini(tp);
9781         tg3_frob_aux_power(tp, false);
9782         pci_set_power_state(tp->pdev, PCI_D3hot);
9783         return err;
9784 }
9785
9786 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9787                                                  struct rtnl_link_stats64 *);
9788 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9789
9790 static int tg3_close(struct net_device *dev)
9791 {
9792         int i;
9793         struct tg3 *tp = netdev_priv(dev);
9794
9795         tg3_napi_disable(tp);
9796         tg3_reset_task_cancel(tp);
9797
9798         netif_tx_stop_all_queues(dev);
9799
9800         del_timer_sync(&tp->timer);
9801
9802         tg3_phy_stop(tp);
9803
9804         tg3_full_lock(tp, 1);
9805
9806         tg3_disable_ints(tp);
9807
9808         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9809         tg3_free_rings(tp);
9810         tg3_flag_clear(tp, INIT_COMPLETE);
9811
9812         tg3_full_unlock(tp);
9813
9814         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9815                 struct tg3_napi *tnapi = &tp->napi[i];
9816                 free_irq(tnapi->irq_vec, tnapi);
9817         }
9818
9819         tg3_ints_fini(tp);
9820
9821         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9822
9823         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9824                sizeof(tp->estats_prev));
9825
9826         tg3_napi_fini(tp);
9827
9828         tg3_free_consistent(tp);
9829
9830         tg3_power_down(tp);
9831
9832         netif_carrier_off(tp->dev);
9833
9834         return 0;
9835 }
9836
9837 static inline u64 get_stat64(tg3_stat64_t *val)
9838 {
9839        return ((u64)val->high << 32) | ((u64)val->low);
9840 }
9841
9842 static u64 calc_crc_errors(struct tg3 *tp)
9843 {
9844         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9845
9846         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9847             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9848              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9849                 u32 val;
9850
9851                 spin_lock_bh(&tp->lock);
9852                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9853                         tg3_writephy(tp, MII_TG3_TEST1,
9854                                      val | MII_TG3_TEST1_CRC_EN);
9855                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9856                 } else
9857                         val = 0;
9858                 spin_unlock_bh(&tp->lock);
9859
9860                 tp->phy_crc_errors += val;
9861
9862                 return tp->phy_crc_errors;
9863         }
9864
9865         return get_stat64(&hw_stats->rx_fcs_errors);
9866 }
9867
9868 #define ESTAT_ADD(member) \
9869         estats->member =        old_estats->member + \
9870                                 get_stat64(&hw_stats->member)
9871
9872 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9873 {
9874         struct tg3_ethtool_stats *estats = &tp->estats;
9875         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9876         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9877
9878         if (!hw_stats)
9879                 return old_estats;
9880
9881         ESTAT_ADD(rx_octets);
9882         ESTAT_ADD(rx_fragments);
9883         ESTAT_ADD(rx_ucast_packets);
9884         ESTAT_ADD(rx_mcast_packets);
9885         ESTAT_ADD(rx_bcast_packets);
9886         ESTAT_ADD(rx_fcs_errors);
9887         ESTAT_ADD(rx_align_errors);
9888         ESTAT_ADD(rx_xon_pause_rcvd);
9889         ESTAT_ADD(rx_xoff_pause_rcvd);
9890         ESTAT_ADD(rx_mac_ctrl_rcvd);
9891         ESTAT_ADD(rx_xoff_entered);
9892         ESTAT_ADD(rx_frame_too_long_errors);
9893         ESTAT_ADD(rx_jabbers);
9894         ESTAT_ADD(rx_undersize_packets);
9895         ESTAT_ADD(rx_in_length_errors);
9896         ESTAT_ADD(rx_out_length_errors);
9897         ESTAT_ADD(rx_64_or_less_octet_packets);
9898         ESTAT_ADD(rx_65_to_127_octet_packets);
9899         ESTAT_ADD(rx_128_to_255_octet_packets);
9900         ESTAT_ADD(rx_256_to_511_octet_packets);
9901         ESTAT_ADD(rx_512_to_1023_octet_packets);
9902         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9903         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9904         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9905         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9906         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9907
9908         ESTAT_ADD(tx_octets);
9909         ESTAT_ADD(tx_collisions);
9910         ESTAT_ADD(tx_xon_sent);
9911         ESTAT_ADD(tx_xoff_sent);
9912         ESTAT_ADD(tx_flow_control);
9913         ESTAT_ADD(tx_mac_errors);
9914         ESTAT_ADD(tx_single_collisions);
9915         ESTAT_ADD(tx_mult_collisions);
9916         ESTAT_ADD(tx_deferred);
9917         ESTAT_ADD(tx_excessive_collisions);
9918         ESTAT_ADD(tx_late_collisions);
9919         ESTAT_ADD(tx_collide_2times);
9920         ESTAT_ADD(tx_collide_3times);
9921         ESTAT_ADD(tx_collide_4times);
9922         ESTAT_ADD(tx_collide_5times);
9923         ESTAT_ADD(tx_collide_6times);
9924         ESTAT_ADD(tx_collide_7times);
9925         ESTAT_ADD(tx_collide_8times);
9926         ESTAT_ADD(tx_collide_9times);
9927         ESTAT_ADD(tx_collide_10times);
9928         ESTAT_ADD(tx_collide_11times);
9929         ESTAT_ADD(tx_collide_12times);
9930         ESTAT_ADD(tx_collide_13times);
9931         ESTAT_ADD(tx_collide_14times);
9932         ESTAT_ADD(tx_collide_15times);
9933         ESTAT_ADD(tx_ucast_packets);
9934         ESTAT_ADD(tx_mcast_packets);
9935         ESTAT_ADD(tx_bcast_packets);
9936         ESTAT_ADD(tx_carrier_sense_errors);
9937         ESTAT_ADD(tx_discards);
9938         ESTAT_ADD(tx_errors);
9939
9940         ESTAT_ADD(dma_writeq_full);
9941         ESTAT_ADD(dma_write_prioq_full);
9942         ESTAT_ADD(rxbds_empty);
9943         ESTAT_ADD(rx_discards);
9944         ESTAT_ADD(rx_errors);
9945         ESTAT_ADD(rx_threshold_hit);
9946
9947         ESTAT_ADD(dma_readq_full);
9948         ESTAT_ADD(dma_read_prioq_full);
9949         ESTAT_ADD(tx_comp_queue_full);
9950
9951         ESTAT_ADD(ring_set_send_prod_index);
9952         ESTAT_ADD(ring_status_update);
9953         ESTAT_ADD(nic_irqs);
9954         ESTAT_ADD(nic_avoided_irqs);
9955         ESTAT_ADD(nic_tx_threshold_hit);
9956
9957         ESTAT_ADD(mbuf_lwm_thresh_hit);
9958
9959         return estats;
9960 }
9961
9962 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9963                                                  struct rtnl_link_stats64 *stats)
9964 {
9965         struct tg3 *tp = netdev_priv(dev);
9966         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9967         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9968
9969         if (!hw_stats)
9970                 return old_stats;
9971
9972         stats->rx_packets = old_stats->rx_packets +
9973                 get_stat64(&hw_stats->rx_ucast_packets) +
9974                 get_stat64(&hw_stats->rx_mcast_packets) +
9975                 get_stat64(&hw_stats->rx_bcast_packets);
9976
9977         stats->tx_packets = old_stats->tx_packets +
9978                 get_stat64(&hw_stats->tx_ucast_packets) +
9979                 get_stat64(&hw_stats->tx_mcast_packets) +
9980                 get_stat64(&hw_stats->tx_bcast_packets);
9981
9982         stats->rx_bytes = old_stats->rx_bytes +
9983                 get_stat64(&hw_stats->rx_octets);
9984         stats->tx_bytes = old_stats->tx_bytes +
9985                 get_stat64(&hw_stats->tx_octets);
9986
9987         stats->rx_errors = old_stats->rx_errors +
9988                 get_stat64(&hw_stats->rx_errors);
9989         stats->tx_errors = old_stats->tx_errors +
9990                 get_stat64(&hw_stats->tx_errors) +
9991                 get_stat64(&hw_stats->tx_mac_errors) +
9992                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9993                 get_stat64(&hw_stats->tx_discards);
9994
9995         stats->multicast = old_stats->multicast +
9996                 get_stat64(&hw_stats->rx_mcast_packets);
9997         stats->collisions = old_stats->collisions +
9998                 get_stat64(&hw_stats->tx_collisions);
9999
10000         stats->rx_length_errors = old_stats->rx_length_errors +
10001                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10002                 get_stat64(&hw_stats->rx_undersize_packets);
10003
10004         stats->rx_over_errors = old_stats->rx_over_errors +
10005                 get_stat64(&hw_stats->rxbds_empty);
10006         stats->rx_frame_errors = old_stats->rx_frame_errors +
10007                 get_stat64(&hw_stats->rx_align_errors);
10008         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10009                 get_stat64(&hw_stats->tx_discards);
10010         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10011                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10012
10013         stats->rx_crc_errors = old_stats->rx_crc_errors +
10014                 calc_crc_errors(tp);
10015
10016         stats->rx_missed_errors = old_stats->rx_missed_errors +
10017                 get_stat64(&hw_stats->rx_discards);
10018
10019         stats->rx_dropped = tp->rx_dropped;
10020         stats->tx_dropped = tp->tx_dropped;
10021
10022         return stats;
10023 }
10024
10025 static inline u32 calc_crc(unsigned char *buf, int len)
10026 {
10027         u32 reg;
10028         u32 tmp;
10029         int j, k;
10030
10031         reg = 0xffffffff;
10032
10033         for (j = 0; j < len; j++) {
10034                 reg ^= buf[j];
10035
10036                 for (k = 0; k < 8; k++) {
10037                         tmp = reg & 0x01;
10038
10039                         reg >>= 1;
10040
10041                         if (tmp)
10042                                 reg ^= 0xedb88320;
10043                 }
10044         }
10045
10046         return ~reg;
10047 }
10048
10049 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10050 {
10051         /* accept or reject all multicast frames */
10052         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10053         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10054         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10055         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10056 }
10057
10058 static void __tg3_set_rx_mode(struct net_device *dev)
10059 {
10060         struct tg3 *tp = netdev_priv(dev);
10061         u32 rx_mode;
10062
10063         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10064                                   RX_MODE_KEEP_VLAN_TAG);
10065
10066 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10067         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10068          * flag clear.
10069          */
10070         if (!tg3_flag(tp, ENABLE_ASF))
10071                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10072 #endif
10073
10074         if (dev->flags & IFF_PROMISC) {
10075                 /* Promiscuous mode. */
10076                 rx_mode |= RX_MODE_PROMISC;
10077         } else if (dev->flags & IFF_ALLMULTI) {
10078                 /* Accept all multicast. */
10079                 tg3_set_multi(tp, 1);
10080         } else if (netdev_mc_empty(dev)) {
10081                 /* Reject all multicast. */
10082                 tg3_set_multi(tp, 0);
10083         } else {
10084                 /* Accept one or more multicast(s). */
10085                 struct netdev_hw_addr *ha;
10086                 u32 mc_filter[4] = { 0, };
10087                 u32 regidx;
10088                 u32 bit;
10089                 u32 crc;
10090
10091                 netdev_for_each_mc_addr(ha, dev) {
10092                         crc = calc_crc(ha->addr, ETH_ALEN);
10093                         bit = ~crc & 0x7f;
10094                         regidx = (bit & 0x60) >> 5;
10095                         bit &= 0x1f;
10096                         mc_filter[regidx] |= (1 << bit);
10097                 }
10098
10099                 tw32(MAC_HASH_REG_0, mc_filter[0]);
10100                 tw32(MAC_HASH_REG_1, mc_filter[1]);
10101                 tw32(MAC_HASH_REG_2, mc_filter[2]);
10102                 tw32(MAC_HASH_REG_3, mc_filter[3]);
10103         }
10104
10105         if (rx_mode != tp->rx_mode) {
10106                 tp->rx_mode = rx_mode;
10107                 tw32_f(MAC_RX_MODE, rx_mode);
10108                 udelay(10);
10109         }
10110 }
10111
10112 static void tg3_set_rx_mode(struct net_device *dev)
10113 {
10114         struct tg3 *tp = netdev_priv(dev);
10115
10116         if (!netif_running(dev))
10117                 return;
10118
10119         tg3_full_lock(tp, 0);
10120         __tg3_set_rx_mode(dev);
10121         tg3_full_unlock(tp);
10122 }
10123
10124 static int tg3_get_regs_len(struct net_device *dev)
10125 {
10126         return TG3_REG_BLK_SIZE;
10127 }
10128
10129 static void tg3_get_regs(struct net_device *dev,
10130                 struct ethtool_regs *regs, void *_p)
10131 {
10132         struct tg3 *tp = netdev_priv(dev);
10133
10134         regs->version = 0;
10135
10136         memset(_p, 0, TG3_REG_BLK_SIZE);
10137
10138         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10139                 return;
10140
10141         tg3_full_lock(tp, 0);
10142
10143         tg3_dump_legacy_regs(tp, (u32 *)_p);
10144
10145         tg3_full_unlock(tp);
10146 }
10147
10148 static int tg3_get_eeprom_len(struct net_device *dev)
10149 {
10150         struct tg3 *tp = netdev_priv(dev);
10151
10152         return tp->nvram_size;
10153 }
10154
10155 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10156 {
10157         struct tg3 *tp = netdev_priv(dev);
10158         int ret;
10159         u8  *pd;
10160         u32 i, offset, len, b_offset, b_count;
10161         __be32 val;
10162
10163         if (tg3_flag(tp, NO_NVRAM))
10164                 return -EINVAL;
10165
10166         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10167                 return -EAGAIN;
10168
10169         offset = eeprom->offset;
10170         len = eeprom->len;
10171         eeprom->len = 0;
10172
10173         eeprom->magic = TG3_EEPROM_MAGIC;
10174
10175         if (offset & 3) {
10176                 /* adjustments to start on required 4 byte boundary */
10177                 b_offset = offset & 3;
10178                 b_count = 4 - b_offset;
10179                 if (b_count > len) {
10180                         /* i.e. offset=1 len=2 */
10181                         b_count = len;
10182                 }
10183                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10184                 if (ret)
10185                         return ret;
10186                 memcpy(data, ((char *)&val) + b_offset, b_count);
10187                 len -= b_count;
10188                 offset += b_count;
10189                 eeprom->len += b_count;
10190         }
10191
10192         /* read bytes up to the last 4 byte boundary */
10193         pd = &data[eeprom->len];
10194         for (i = 0; i < (len - (len & 3)); i += 4) {
10195                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10196                 if (ret) {
10197                         eeprom->len += i;
10198                         return ret;
10199                 }
10200                 memcpy(pd + i, &val, 4);
10201         }
10202         eeprom->len += i;
10203
10204         if (len & 3) {
10205                 /* read last bytes not ending on 4 byte boundary */
10206                 pd = &data[eeprom->len];
10207                 b_count = len & 3;
10208                 b_offset = offset + len - b_count;
10209                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10210                 if (ret)
10211                         return ret;
10212                 memcpy(pd, &val, b_count);
10213                 eeprom->len += b_count;
10214         }
10215         return 0;
10216 }
10217
10218 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10219
10220 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10221 {
10222         struct tg3 *tp = netdev_priv(dev);
10223         int ret;
10224         u32 offset, len, b_offset, odd_len;
10225         u8 *buf;
10226         __be32 start, end;
10227
10228         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10229                 return -EAGAIN;
10230
10231         if (tg3_flag(tp, NO_NVRAM) ||
10232             eeprom->magic != TG3_EEPROM_MAGIC)
10233                 return -EINVAL;
10234
10235         offset = eeprom->offset;
10236         len = eeprom->len;
10237
10238         if ((b_offset = (offset & 3))) {
10239                 /* adjustments to start on required 4 byte boundary */
10240                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10241                 if (ret)
10242                         return ret;
10243                 len += b_offset;
10244                 offset &= ~3;
10245                 if (len < 4)
10246                         len = 4;
10247         }
10248
10249         odd_len = 0;
10250         if (len & 3) {
10251                 /* adjustments to end on required 4 byte boundary */
10252                 odd_len = 1;
10253                 len = (len + 3) & ~3;
10254                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10255                 if (ret)
10256                         return ret;
10257         }
10258
10259         buf = data;
10260         if (b_offset || odd_len) {
10261                 buf = kmalloc(len, GFP_KERNEL);
10262                 if (!buf)
10263                         return -ENOMEM;
10264                 if (b_offset)
10265                         memcpy(buf, &start, 4);
10266                 if (odd_len)
10267                         memcpy(buf+len-4, &end, 4);
10268                 memcpy(buf + b_offset, data, eeprom->len);
10269         }
10270
10271         ret = tg3_nvram_write_block(tp, offset, len, buf);
10272
10273         if (buf != data)
10274                 kfree(buf);
10275
10276         return ret;
10277 }
10278
10279 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10280 {
10281         struct tg3 *tp = netdev_priv(dev);
10282
10283         if (tg3_flag(tp, USE_PHYLIB)) {
10284                 struct phy_device *phydev;
10285                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10286                         return -EAGAIN;
10287                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10288                 return phy_ethtool_gset(phydev, cmd);
10289         }
10290
10291         cmd->supported = (SUPPORTED_Autoneg);
10292
10293         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10294                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10295                                    SUPPORTED_1000baseT_Full);
10296
10297         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10298                 cmd->supported |= (SUPPORTED_100baseT_Half |
10299                                   SUPPORTED_100baseT_Full |
10300                                   SUPPORTED_10baseT_Half |
10301                                   SUPPORTED_10baseT_Full |
10302                                   SUPPORTED_TP);
10303                 cmd->port = PORT_TP;
10304         } else {
10305                 cmd->supported |= SUPPORTED_FIBRE;
10306                 cmd->port = PORT_FIBRE;
10307         }
10308
10309         cmd->advertising = tp->link_config.advertising;
10310         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10311                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10312                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10313                                 cmd->advertising |= ADVERTISED_Pause;
10314                         } else {
10315                                 cmd->advertising |= ADVERTISED_Pause |
10316                                                     ADVERTISED_Asym_Pause;
10317                         }
10318                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10319                         cmd->advertising |= ADVERTISED_Asym_Pause;
10320                 }
10321         }
10322         if (netif_running(dev)) {
10323                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10324                 cmd->duplex = tp->link_config.active_duplex;
10325         } else {
10326                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10327                 cmd->duplex = DUPLEX_INVALID;
10328         }
10329         cmd->phy_address = tp->phy_addr;
10330         cmd->transceiver = XCVR_INTERNAL;
10331         cmd->autoneg = tp->link_config.autoneg;
10332         cmd->maxtxpkt = 0;
10333         cmd->maxrxpkt = 0;
10334         return 0;
10335 }
10336
10337 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10338 {
10339         struct tg3 *tp = netdev_priv(dev);
10340         u32 speed = ethtool_cmd_speed(cmd);
10341
10342         if (tg3_flag(tp, USE_PHYLIB)) {
10343                 struct phy_device *phydev;
10344                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10345                         return -EAGAIN;
10346                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10347                 return phy_ethtool_sset(phydev, cmd);
10348         }
10349
10350         if (cmd->autoneg != AUTONEG_ENABLE &&
10351             cmd->autoneg != AUTONEG_DISABLE)
10352                 return -EINVAL;
10353
10354         if (cmd->autoneg == AUTONEG_DISABLE &&
10355             cmd->duplex != DUPLEX_FULL &&
10356             cmd->duplex != DUPLEX_HALF)
10357                 return -EINVAL;
10358
10359         if (cmd->autoneg == AUTONEG_ENABLE) {
10360                 u32 mask = ADVERTISED_Autoneg |
10361                            ADVERTISED_Pause |
10362                            ADVERTISED_Asym_Pause;
10363
10364                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10365                         mask |= ADVERTISED_1000baseT_Half |
10366                                 ADVERTISED_1000baseT_Full;
10367
10368                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10369                         mask |= ADVERTISED_100baseT_Half |
10370                                 ADVERTISED_100baseT_Full |
10371                                 ADVERTISED_10baseT_Half |
10372                                 ADVERTISED_10baseT_Full |
10373                                 ADVERTISED_TP;
10374                 else
10375                         mask |= ADVERTISED_FIBRE;
10376
10377                 if (cmd->advertising & ~mask)
10378                         return -EINVAL;
10379
10380                 mask &= (ADVERTISED_1000baseT_Half |
10381                          ADVERTISED_1000baseT_Full |
10382                          ADVERTISED_100baseT_Half |
10383                          ADVERTISED_100baseT_Full |
10384                          ADVERTISED_10baseT_Half |
10385                          ADVERTISED_10baseT_Full);
10386
10387                 cmd->advertising &= mask;
10388         } else {
10389                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10390                         if (speed != SPEED_1000)
10391                                 return -EINVAL;
10392
10393                         if (cmd->duplex != DUPLEX_FULL)
10394                                 return -EINVAL;
10395                 } else {
10396                         if (speed != SPEED_100 &&
10397                             speed != SPEED_10)
10398                                 return -EINVAL;
10399                 }
10400         }
10401
10402         tg3_full_lock(tp, 0);
10403
10404         tp->link_config.autoneg = cmd->autoneg;
10405         if (cmd->autoneg == AUTONEG_ENABLE) {
10406                 tp->link_config.advertising = (cmd->advertising |
10407                                               ADVERTISED_Autoneg);
10408                 tp->link_config.speed = SPEED_INVALID;
10409                 tp->link_config.duplex = DUPLEX_INVALID;
10410         } else {
10411                 tp->link_config.advertising = 0;
10412                 tp->link_config.speed = speed;
10413                 tp->link_config.duplex = cmd->duplex;
10414         }
10415
10416         tp->link_config.orig_speed = tp->link_config.speed;
10417         tp->link_config.orig_duplex = tp->link_config.duplex;
10418         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10419
10420         if (netif_running(dev))
10421                 tg3_setup_phy(tp, 1);
10422
10423         tg3_full_unlock(tp);
10424
10425         return 0;
10426 }
10427
10428 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10429 {
10430         struct tg3 *tp = netdev_priv(dev);
10431
10432         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10433         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10434         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10435         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10436 }
10437
10438 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10439 {
10440         struct tg3 *tp = netdev_priv(dev);
10441
10442         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10443                 wol->supported = WAKE_MAGIC;
10444         else
10445                 wol->supported = 0;
10446         wol->wolopts = 0;
10447         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10448                 wol->wolopts = WAKE_MAGIC;
10449         memset(&wol->sopass, 0, sizeof(wol->sopass));
10450 }
10451
10452 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10453 {
10454         struct tg3 *tp = netdev_priv(dev);
10455         struct device *dp = &tp->pdev->dev;
10456
10457         if (wol->wolopts & ~WAKE_MAGIC)
10458                 return -EINVAL;
10459         if ((wol->wolopts & WAKE_MAGIC) &&
10460             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10461                 return -EINVAL;
10462
10463         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10464
10465         spin_lock_bh(&tp->lock);
10466         if (device_may_wakeup(dp))
10467                 tg3_flag_set(tp, WOL_ENABLE);
10468         else
10469                 tg3_flag_clear(tp, WOL_ENABLE);
10470         spin_unlock_bh(&tp->lock);
10471
10472         return 0;
10473 }
10474
10475 static u32 tg3_get_msglevel(struct net_device *dev)
10476 {
10477         struct tg3 *tp = netdev_priv(dev);
10478         return tp->msg_enable;
10479 }
10480
10481 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10482 {
10483         struct tg3 *tp = netdev_priv(dev);
10484         tp->msg_enable = value;
10485 }
10486
10487 static int tg3_nway_reset(struct net_device *dev)
10488 {
10489         struct tg3 *tp = netdev_priv(dev);
10490         int r;
10491
10492         if (!netif_running(dev))
10493                 return -EAGAIN;
10494
10495         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10496                 return -EINVAL;
10497
10498         if (tg3_flag(tp, USE_PHYLIB)) {
10499                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10500                         return -EAGAIN;
10501                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10502         } else {
10503                 u32 bmcr;
10504
10505                 spin_lock_bh(&tp->lock);
10506                 r = -EINVAL;
10507                 tg3_readphy(tp, MII_BMCR, &bmcr);
10508                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10509                     ((bmcr & BMCR_ANENABLE) ||
10510                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10511                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10512                                                    BMCR_ANENABLE);
10513                         r = 0;
10514                 }
10515                 spin_unlock_bh(&tp->lock);
10516         }
10517
10518         return r;
10519 }
10520
10521 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10522 {
10523         struct tg3 *tp = netdev_priv(dev);
10524
10525         ering->rx_max_pending = tp->rx_std_ring_mask;
10526         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10527                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10528         else
10529                 ering->rx_jumbo_max_pending = 0;
10530
10531         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10532
10533         ering->rx_pending = tp->rx_pending;
10534         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10535                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10536         else
10537                 ering->rx_jumbo_pending = 0;
10538
10539         ering->tx_pending = tp->napi[0].tx_pending;
10540 }
10541
10542 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10543 {
10544         struct tg3 *tp = netdev_priv(dev);
10545         int i, irq_sync = 0, err = 0;
10546
10547         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10548             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10549             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10550             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10551             (tg3_flag(tp, TSO_BUG) &&
10552              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10553                 return -EINVAL;
10554
10555         if (netif_running(dev)) {
10556                 tg3_phy_stop(tp);
10557                 tg3_netif_stop(tp);
10558                 irq_sync = 1;
10559         }
10560
10561         tg3_full_lock(tp, irq_sync);
10562
10563         tp->rx_pending = ering->rx_pending;
10564
10565         if (tg3_flag(tp, MAX_RXPEND_64) &&
10566             tp->rx_pending > 63)
10567                 tp->rx_pending = 63;
10568         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10569
10570         for (i = 0; i < tp->irq_max; i++)
10571                 tp->napi[i].tx_pending = ering->tx_pending;
10572
10573         if (netif_running(dev)) {
10574                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10575                 err = tg3_restart_hw(tp, 1);
10576                 if (!err)
10577                         tg3_netif_start(tp);
10578         }
10579
10580         tg3_full_unlock(tp);
10581
10582         if (irq_sync && !err)
10583                 tg3_phy_start(tp);
10584
10585         return err;
10586 }
10587
10588 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10589 {
10590         struct tg3 *tp = netdev_priv(dev);
10591
10592         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10593
10594         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10595                 epause->rx_pause = 1;
10596         else
10597                 epause->rx_pause = 0;
10598
10599         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10600                 epause->tx_pause = 1;
10601         else
10602                 epause->tx_pause = 0;
10603 }
10604
10605 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10606 {
10607         struct tg3 *tp = netdev_priv(dev);
10608         int err = 0;
10609
10610         if (tg3_flag(tp, USE_PHYLIB)) {
10611                 u32 newadv;
10612                 struct phy_device *phydev;
10613
10614                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10615
10616                 if (!(phydev->supported & SUPPORTED_Pause) ||
10617                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10618                      (epause->rx_pause != epause->tx_pause)))
10619                         return -EINVAL;
10620
10621                 tp->link_config.flowctrl = 0;
10622                 if (epause->rx_pause) {
10623                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10624
10625                         if (epause->tx_pause) {
10626                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10627                                 newadv = ADVERTISED_Pause;
10628                         } else
10629                                 newadv = ADVERTISED_Pause |
10630                                          ADVERTISED_Asym_Pause;
10631                 } else if (epause->tx_pause) {
10632                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10633                         newadv = ADVERTISED_Asym_Pause;
10634                 } else
10635                         newadv = 0;
10636
10637                 if (epause->autoneg)
10638                         tg3_flag_set(tp, PAUSE_AUTONEG);
10639                 else
10640                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10641
10642                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10643                         u32 oldadv = phydev->advertising &
10644                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10645                         if (oldadv != newadv) {
10646                                 phydev->advertising &=
10647                                         ~(ADVERTISED_Pause |
10648                                           ADVERTISED_Asym_Pause);
10649                                 phydev->advertising |= newadv;
10650                                 if (phydev->autoneg) {
10651                                         /*
10652                                          * Always renegotiate the link to
10653                                          * inform our link partner of our
10654                                          * flow control settings, even if the
10655                                          * flow control is forced.  Let
10656                                          * tg3_adjust_link() do the final
10657                                          * flow control setup.
10658                                          */
10659                                         return phy_start_aneg(phydev);
10660                                 }
10661                         }
10662
10663                         if (!epause->autoneg)
10664                                 tg3_setup_flow_control(tp, 0, 0);
10665                 } else {
10666                         tp->link_config.orig_advertising &=
10667                                         ~(ADVERTISED_Pause |
10668                                           ADVERTISED_Asym_Pause);
10669                         tp->link_config.orig_advertising |= newadv;
10670                 }
10671         } else {
10672                 int irq_sync = 0;
10673
10674                 if (netif_running(dev)) {
10675                         tg3_netif_stop(tp);
10676                         irq_sync = 1;
10677                 }
10678
10679                 tg3_full_lock(tp, irq_sync);
10680
10681                 if (epause->autoneg)
10682                         tg3_flag_set(tp, PAUSE_AUTONEG);
10683                 else
10684                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10685                 if (epause->rx_pause)
10686                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10687                 else
10688                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10689                 if (epause->tx_pause)
10690                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10691                 else
10692                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10693
10694                 if (netif_running(dev)) {
10695                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10696                         err = tg3_restart_hw(tp, 1);
10697                         if (!err)
10698                                 tg3_netif_start(tp);
10699                 }
10700
10701                 tg3_full_unlock(tp);
10702         }
10703
10704         return err;
10705 }
10706
10707 static int tg3_get_sset_count(struct net_device *dev, int sset)
10708 {
10709         switch (sset) {
10710         case ETH_SS_TEST:
10711                 return TG3_NUM_TEST;
10712         case ETH_SS_STATS:
10713                 return TG3_NUM_STATS;
10714         default:
10715                 return -EOPNOTSUPP;
10716         }
10717 }
10718
10719 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10720 {
10721         switch (stringset) {
10722         case ETH_SS_STATS:
10723                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10724                 break;
10725         case ETH_SS_TEST:
10726                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10727                 break;
10728         default:
10729                 WARN_ON(1);     /* we need a WARN() */
10730                 break;
10731         }
10732 }
10733
10734 static int tg3_set_phys_id(struct net_device *dev,
10735                             enum ethtool_phys_id_state state)
10736 {
10737         struct tg3 *tp = netdev_priv(dev);
10738
10739         if (!netif_running(tp->dev))
10740                 return -EAGAIN;
10741
10742         switch (state) {
10743         case ETHTOOL_ID_ACTIVE:
10744                 return 1;       /* cycle on/off once per second */
10745
10746         case ETHTOOL_ID_ON:
10747                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10748                      LED_CTRL_1000MBPS_ON |
10749                      LED_CTRL_100MBPS_ON |
10750                      LED_CTRL_10MBPS_ON |
10751                      LED_CTRL_TRAFFIC_OVERRIDE |
10752                      LED_CTRL_TRAFFIC_BLINK |
10753                      LED_CTRL_TRAFFIC_LED);
10754                 break;
10755
10756         case ETHTOOL_ID_OFF:
10757                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10758                      LED_CTRL_TRAFFIC_OVERRIDE);
10759                 break;
10760
10761         case ETHTOOL_ID_INACTIVE:
10762                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10763                 break;
10764         }
10765
10766         return 0;
10767 }
10768
10769 static void tg3_get_ethtool_stats(struct net_device *dev,
10770                                    struct ethtool_stats *estats, u64 *tmp_stats)
10771 {
10772         struct tg3 *tp = netdev_priv(dev);
10773         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10774 }
10775
10776 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10777 {
10778         int i;
10779         __be32 *buf;
10780         u32 offset = 0, len = 0;
10781         u32 magic, val;
10782
10783         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10784                 return NULL;
10785
10786         if (magic == TG3_EEPROM_MAGIC) {
10787                 for (offset = TG3_NVM_DIR_START;
10788                      offset < TG3_NVM_DIR_END;
10789                      offset += TG3_NVM_DIRENT_SIZE) {
10790                         if (tg3_nvram_read(tp, offset, &val))
10791                                 return NULL;
10792
10793                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10794                             TG3_NVM_DIRTYPE_EXTVPD)
10795                                 break;
10796                 }
10797
10798                 if (offset != TG3_NVM_DIR_END) {
10799                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10800                         if (tg3_nvram_read(tp, offset + 4, &offset))
10801                                 return NULL;
10802
10803                         offset = tg3_nvram_logical_addr(tp, offset);
10804                 }
10805         }
10806
10807         if (!offset || !len) {
10808                 offset = TG3_NVM_VPD_OFF;
10809                 len = TG3_NVM_VPD_LEN;
10810         }
10811
10812         buf = kmalloc(len, GFP_KERNEL);
10813         if (buf == NULL)
10814                 return NULL;
10815
10816         if (magic == TG3_EEPROM_MAGIC) {
10817                 for (i = 0; i < len; i += 4) {
10818                         /* The data is in little-endian format in NVRAM.
10819                          * Use the big-endian read routines to preserve
10820                          * the byte order as it exists in NVRAM.
10821                          */
10822                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10823                                 goto error;
10824                 }
10825         } else {
10826                 u8 *ptr;
10827                 ssize_t cnt;
10828                 unsigned int pos = 0;
10829
10830                 ptr = (u8 *)&buf[0];
10831                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10832                         cnt = pci_read_vpd(tp->pdev, pos,
10833                                            len - pos, ptr);
10834                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10835                                 cnt = 0;
10836                         else if (cnt < 0)
10837                                 goto error;
10838                 }
10839                 if (pos != len)
10840                         goto error;
10841         }
10842
10843         *vpdlen = len;
10844
10845         return buf;
10846
10847 error:
10848         kfree(buf);
10849         return NULL;
10850 }
10851
10852 #define NVRAM_TEST_SIZE 0x100
10853 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10854 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10855 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10856 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10857 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10858 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10859 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10860 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10861
10862 static int tg3_test_nvram(struct tg3 *tp)
10863 {
10864         u32 csum, magic, len;
10865         __be32 *buf;
10866         int i, j, k, err = 0, size;
10867
10868         if (tg3_flag(tp, NO_NVRAM))
10869                 return 0;
10870
10871         if (tg3_nvram_read(tp, 0, &magic) != 0)
10872                 return -EIO;
10873
10874         if (magic == TG3_EEPROM_MAGIC)
10875                 size = NVRAM_TEST_SIZE;
10876         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10877                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10878                     TG3_EEPROM_SB_FORMAT_1) {
10879                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10880                         case TG3_EEPROM_SB_REVISION_0:
10881                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10882                                 break;
10883                         case TG3_EEPROM_SB_REVISION_2:
10884                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10885                                 break;
10886                         case TG3_EEPROM_SB_REVISION_3:
10887                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10888                                 break;
10889                         case TG3_EEPROM_SB_REVISION_4:
10890                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10891                                 break;
10892                         case TG3_EEPROM_SB_REVISION_5:
10893                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10894                                 break;
10895                         case TG3_EEPROM_SB_REVISION_6:
10896                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10897                                 break;
10898                         default:
10899                                 return -EIO;
10900                         }
10901                 } else
10902                         return 0;
10903         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10904                 size = NVRAM_SELFBOOT_HW_SIZE;
10905         else
10906                 return -EIO;
10907
10908         buf = kmalloc(size, GFP_KERNEL);
10909         if (buf == NULL)
10910                 return -ENOMEM;
10911
10912         err = -EIO;
10913         for (i = 0, j = 0; i < size; i += 4, j++) {
10914                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10915                 if (err)
10916                         break;
10917         }
10918         if (i < size)
10919                 goto out;
10920
10921         /* Selfboot format */
10922         magic = be32_to_cpu(buf[0]);
10923         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10924             TG3_EEPROM_MAGIC_FW) {
10925                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10926
10927                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10928                     TG3_EEPROM_SB_REVISION_2) {
10929                         /* For rev 2, the csum doesn't include the MBA. */
10930                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10931                                 csum8 += buf8[i];
10932                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10933                                 csum8 += buf8[i];
10934                 } else {
10935                         for (i = 0; i < size; i++)
10936                                 csum8 += buf8[i];
10937                 }
10938
10939                 if (csum8 == 0) {
10940                         err = 0;
10941                         goto out;
10942                 }
10943
10944                 err = -EIO;
10945                 goto out;
10946         }
10947
10948         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10949             TG3_EEPROM_MAGIC_HW) {
10950                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10951                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10952                 u8 *buf8 = (u8 *) buf;
10953
10954                 /* Separate the parity bits and the data bytes.  */
10955                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10956                         if ((i == 0) || (i == 8)) {
10957                                 int l;
10958                                 u8 msk;
10959
10960                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10961                                         parity[k++] = buf8[i] & msk;
10962                                 i++;
10963                         } else if (i == 16) {
10964                                 int l;
10965                                 u8 msk;
10966
10967                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10968                                         parity[k++] = buf8[i] & msk;
10969                                 i++;
10970
10971                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10972                                         parity[k++] = buf8[i] & msk;
10973                                 i++;
10974                         }
10975                         data[j++] = buf8[i];
10976                 }
10977
10978                 err = -EIO;
10979                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10980                         u8 hw8 = hweight8(data[i]);
10981
10982                         if ((hw8 & 0x1) && parity[i])
10983                                 goto out;
10984                         else if (!(hw8 & 0x1) && !parity[i])
10985                                 goto out;
10986                 }
10987                 err = 0;
10988                 goto out;
10989         }
10990
10991         err = -EIO;
10992
10993         /* Bootstrap checksum at offset 0x10 */
10994         csum = calc_crc((unsigned char *) buf, 0x10);
10995         if (csum != le32_to_cpu(buf[0x10/4]))
10996                 goto out;
10997
10998         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10999         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11000         if (csum != le32_to_cpu(buf[0xfc/4]))
11001                 goto out;
11002
11003         kfree(buf);
11004
11005         buf = tg3_vpd_readblock(tp, &len);
11006         if (!buf)
11007                 return -ENOMEM;
11008
11009         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11010         if (i > 0) {
11011                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11012                 if (j < 0)
11013                         goto out;
11014
11015                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11016                         goto out;
11017
11018                 i += PCI_VPD_LRDT_TAG_SIZE;
11019                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11020                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11021                 if (j > 0) {
11022                         u8 csum8 = 0;
11023
11024                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11025
11026                         for (i = 0; i <= j; i++)
11027                                 csum8 += ((u8 *)buf)[i];
11028
11029                         if (csum8)
11030                                 goto out;
11031                 }
11032         }
11033
11034         err = 0;
11035
11036 out:
11037         kfree(buf);
11038         return err;
11039 }
11040
11041 #define TG3_SERDES_TIMEOUT_SEC  2
11042 #define TG3_COPPER_TIMEOUT_SEC  6
11043
11044 static int tg3_test_link(struct tg3 *tp)
11045 {
11046         int i, max;
11047
11048         if (!netif_running(tp->dev))
11049                 return -ENODEV;
11050
11051         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11052                 max = TG3_SERDES_TIMEOUT_SEC;
11053         else
11054                 max = TG3_COPPER_TIMEOUT_SEC;
11055
11056         for (i = 0; i < max; i++) {
11057                 if (netif_carrier_ok(tp->dev))
11058                         return 0;
11059
11060                 if (msleep_interruptible(1000))
11061                         break;
11062         }
11063
11064         return -EIO;
11065 }
11066
11067 /* Only test the commonly used registers */
11068 static int tg3_test_registers(struct tg3 *tp)
11069 {
11070         int i, is_5705, is_5750;
11071         u32 offset, read_mask, write_mask, val, save_val, read_val;
11072         static struct {
11073                 u16 offset;
11074                 u16 flags;
11075 #define TG3_FL_5705     0x1
11076 #define TG3_FL_NOT_5705 0x2
11077 #define TG3_FL_NOT_5788 0x4
11078 #define TG3_FL_NOT_5750 0x8
11079                 u32 read_mask;
11080                 u32 write_mask;
11081         } reg_tbl[] = {
11082                 /* MAC Control Registers */
11083                 { MAC_MODE, TG3_FL_NOT_5705,
11084                         0x00000000, 0x00ef6f8c },
11085                 { MAC_MODE, TG3_FL_5705,
11086                         0x00000000, 0x01ef6b8c },
11087                 { MAC_STATUS, TG3_FL_NOT_5705,
11088                         0x03800107, 0x00000000 },
11089                 { MAC_STATUS, TG3_FL_5705,
11090                         0x03800100, 0x00000000 },
11091                 { MAC_ADDR_0_HIGH, 0x0000,
11092                         0x00000000, 0x0000ffff },
11093                 { MAC_ADDR_0_LOW, 0x0000,
11094                         0x00000000, 0xffffffff },
11095                 { MAC_RX_MTU_SIZE, 0x0000,
11096                         0x00000000, 0x0000ffff },
11097                 { MAC_TX_MODE, 0x0000,
11098                         0x00000000, 0x00000070 },
11099                 { MAC_TX_LENGTHS, 0x0000,
11100                         0x00000000, 0x00003fff },
11101                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11102                         0x00000000, 0x000007fc },
11103                 { MAC_RX_MODE, TG3_FL_5705,
11104                         0x00000000, 0x000007dc },
11105                 { MAC_HASH_REG_0, 0x0000,
11106                         0x00000000, 0xffffffff },
11107                 { MAC_HASH_REG_1, 0x0000,
11108                         0x00000000, 0xffffffff },
11109                 { MAC_HASH_REG_2, 0x0000,
11110                         0x00000000, 0xffffffff },
11111                 { MAC_HASH_REG_3, 0x0000,
11112                         0x00000000, 0xffffffff },
11113
11114                 /* Receive Data and Receive BD Initiator Control Registers. */
11115                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11116                         0x00000000, 0xffffffff },
11117                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11118                         0x00000000, 0xffffffff },
11119                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11120                         0x00000000, 0x00000003 },
11121                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11122                         0x00000000, 0xffffffff },
11123                 { RCVDBDI_STD_BD+0, 0x0000,
11124                         0x00000000, 0xffffffff },
11125                 { RCVDBDI_STD_BD+4, 0x0000,
11126                         0x00000000, 0xffffffff },
11127                 { RCVDBDI_STD_BD+8, 0x0000,
11128                         0x00000000, 0xffff0002 },
11129                 { RCVDBDI_STD_BD+0xc, 0x0000,
11130                         0x00000000, 0xffffffff },
11131
11132                 /* Receive BD Initiator Control Registers. */
11133                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11134                         0x00000000, 0xffffffff },
11135                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11136                         0x00000000, 0x000003ff },
11137                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11138                         0x00000000, 0xffffffff },
11139
11140                 /* Host Coalescing Control Registers. */
11141                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11142                         0x00000000, 0x00000004 },
11143                 { HOSTCC_MODE, TG3_FL_5705,
11144                         0x00000000, 0x000000f6 },
11145                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11146                         0x00000000, 0xffffffff },
11147                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11148                         0x00000000, 0x000003ff },
11149                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11150                         0x00000000, 0xffffffff },
11151                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11152                         0x00000000, 0x000003ff },
11153                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11154                         0x00000000, 0xffffffff },
11155                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11156                         0x00000000, 0x000000ff },
11157                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11158                         0x00000000, 0xffffffff },
11159                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11160                         0x00000000, 0x000000ff },
11161                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11162                         0x00000000, 0xffffffff },
11163                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11164                         0x00000000, 0xffffffff },
11165                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11166                         0x00000000, 0xffffffff },
11167                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11168                         0x00000000, 0x000000ff },
11169                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11170                         0x00000000, 0xffffffff },
11171                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11172                         0x00000000, 0x000000ff },
11173                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11174                         0x00000000, 0xffffffff },
11175                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11176                         0x00000000, 0xffffffff },
11177                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11178                         0x00000000, 0xffffffff },
11179                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11180                         0x00000000, 0xffffffff },
11181                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11182                         0x00000000, 0xffffffff },
11183                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11184                         0xffffffff, 0x00000000 },
11185                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11186                         0xffffffff, 0x00000000 },
11187
11188                 /* Buffer Manager Control Registers. */
11189                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11190                         0x00000000, 0x007fff80 },
11191                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11192                         0x00000000, 0x007fffff },
11193                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11194                         0x00000000, 0x0000003f },
11195                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11196                         0x00000000, 0x000001ff },
11197                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11198                         0x00000000, 0x000001ff },
11199                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11200                         0xffffffff, 0x00000000 },
11201                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11202                         0xffffffff, 0x00000000 },
11203
11204                 /* Mailbox Registers */
11205                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11206                         0x00000000, 0x000001ff },
11207                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11208                         0x00000000, 0x000001ff },
11209                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11210                         0x00000000, 0x000007ff },
11211                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11212                         0x00000000, 0x000001ff },
11213
11214                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11215         };
11216
11217         is_5705 = is_5750 = 0;
11218         if (tg3_flag(tp, 5705_PLUS)) {
11219                 is_5705 = 1;
11220                 if (tg3_flag(tp, 5750_PLUS))
11221                         is_5750 = 1;
11222         }
11223
11224         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11225                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11226                         continue;
11227
11228                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11229                         continue;
11230
11231                 if (tg3_flag(tp, IS_5788) &&
11232                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11233                         continue;
11234
11235                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11236                         continue;
11237
11238                 offset = (u32) reg_tbl[i].offset;
11239                 read_mask = reg_tbl[i].read_mask;
11240                 write_mask = reg_tbl[i].write_mask;
11241
11242                 /* Save the original register content */
11243                 save_val = tr32(offset);
11244
11245                 /* Determine the read-only value. */
11246                 read_val = save_val & read_mask;
11247
11248                 /* Write zero to the register, then make sure the read-only bits
11249                  * are not changed and the read/write bits are all zeros.
11250                  */
11251                 tw32(offset, 0);
11252
11253                 val = tr32(offset);
11254
11255                 /* Test the read-only and read/write bits. */
11256                 if (((val & read_mask) != read_val) || (val & write_mask))
11257                         goto out;
11258
11259                 /* Write ones to all the bits defined by RdMask and WrMask, then
11260                  * make sure the read-only bits are not changed and the
11261                  * read/write bits are all ones.
11262                  */
11263                 tw32(offset, read_mask | write_mask);
11264
11265                 val = tr32(offset);
11266
11267                 /* Test the read-only bits. */
11268                 if ((val & read_mask) != read_val)
11269                         goto out;
11270
11271                 /* Test the read/write bits. */
11272                 if ((val & write_mask) != write_mask)
11273                         goto out;
11274
11275                 tw32(offset, save_val);
11276         }
11277
11278         return 0;
11279
11280 out:
11281         if (netif_msg_hw(tp))
11282                 netdev_err(tp->dev,
11283                            "Register test failed at offset %x\n", offset);
11284         tw32(offset, save_val);
11285         return -EIO;
11286 }
11287
11288 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11289 {
11290         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11291         int i;
11292         u32 j;
11293
11294         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11295                 for (j = 0; j < len; j += 4) {
11296                         u32 val;
11297
11298                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11299                         tg3_read_mem(tp, offset + j, &val);
11300                         if (val != test_pattern[i])
11301                                 return -EIO;
11302                 }
11303         }
11304         return 0;
11305 }
11306
11307 static int tg3_test_memory(struct tg3 *tp)
11308 {
11309         static struct mem_entry {
11310                 u32 offset;
11311                 u32 len;
11312         } mem_tbl_570x[] = {
11313                 { 0x00000000, 0x00b50},
11314                 { 0x00002000, 0x1c000},
11315                 { 0xffffffff, 0x00000}
11316         }, mem_tbl_5705[] = {
11317                 { 0x00000100, 0x0000c},
11318                 { 0x00000200, 0x00008},
11319                 { 0x00004000, 0x00800},
11320                 { 0x00006000, 0x01000},
11321                 { 0x00008000, 0x02000},
11322                 { 0x00010000, 0x0e000},
11323                 { 0xffffffff, 0x00000}
11324         }, mem_tbl_5755[] = {
11325                 { 0x00000200, 0x00008},
11326                 { 0x00004000, 0x00800},
11327                 { 0x00006000, 0x00800},
11328                 { 0x00008000, 0x02000},
11329                 { 0x00010000, 0x0c000},
11330                 { 0xffffffff, 0x00000}
11331         }, mem_tbl_5906[] = {
11332                 { 0x00000200, 0x00008},
11333                 { 0x00004000, 0x00400},
11334                 { 0x00006000, 0x00400},
11335                 { 0x00008000, 0x01000},
11336                 { 0x00010000, 0x01000},
11337                 { 0xffffffff, 0x00000}
11338         }, mem_tbl_5717[] = {
11339                 { 0x00000200, 0x00008},
11340                 { 0x00010000, 0x0a000},
11341                 { 0x00020000, 0x13c00},
11342                 { 0xffffffff, 0x00000}
11343         }, mem_tbl_57765[] = {
11344                 { 0x00000200, 0x00008},
11345                 { 0x00004000, 0x00800},
11346                 { 0x00006000, 0x09800},
11347                 { 0x00010000, 0x0a000},
11348                 { 0xffffffff, 0x00000}
11349         };
11350         struct mem_entry *mem_tbl;
11351         int err = 0;
11352         int i;
11353
11354         if (tg3_flag(tp, 5717_PLUS))
11355                 mem_tbl = mem_tbl_5717;
11356         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11357                 mem_tbl = mem_tbl_57765;
11358         else if (tg3_flag(tp, 5755_PLUS))
11359                 mem_tbl = mem_tbl_5755;
11360         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11361                 mem_tbl = mem_tbl_5906;
11362         else if (tg3_flag(tp, 5705_PLUS))
11363                 mem_tbl = mem_tbl_5705;
11364         else
11365                 mem_tbl = mem_tbl_570x;
11366
11367         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11368                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11369                 if (err)
11370                         break;
11371         }
11372
11373         return err;
11374 }
11375
11376 #define TG3_TSO_MSS             500
11377
11378 #define TG3_TSO_IP_HDR_LEN      20
11379 #define TG3_TSO_TCP_HDR_LEN     20
11380 #define TG3_TSO_TCP_OPT_LEN     12
11381
11382 static const u8 tg3_tso_header[] = {
11383 0x08, 0x00,
11384 0x45, 0x00, 0x00, 0x00,
11385 0x00, 0x00, 0x40, 0x00,
11386 0x40, 0x06, 0x00, 0x00,
11387 0x0a, 0x00, 0x00, 0x01,
11388 0x0a, 0x00, 0x00, 0x02,
11389 0x0d, 0x00, 0xe0, 0x00,
11390 0x00, 0x00, 0x01, 0x00,
11391 0x00, 0x00, 0x02, 0x00,
11392 0x80, 0x10, 0x10, 0x00,
11393 0x14, 0x09, 0x00, 0x00,
11394 0x01, 0x01, 0x08, 0x0a,
11395 0x11, 0x11, 0x11, 0x11,
11396 0x11, 0x11, 0x11, 0x11,
11397 };
11398
11399 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11400 {
11401         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11402         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11403         u32 budget;
11404         struct sk_buff *skb, *rx_skb;
11405         u8 *tx_data;
11406         dma_addr_t map;
11407         int num_pkts, tx_len, rx_len, i, err;
11408         struct tg3_rx_buffer_desc *desc;
11409         struct tg3_napi *tnapi, *rnapi;
11410         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11411
11412         tnapi = &tp->napi[0];
11413         rnapi = &tp->napi[0];
11414         if (tp->irq_cnt > 1) {
11415                 if (tg3_flag(tp, ENABLE_RSS))
11416                         rnapi = &tp->napi[1];
11417                 if (tg3_flag(tp, ENABLE_TSS))
11418                         tnapi = &tp->napi[1];
11419         }
11420         coal_now = tnapi->coal_now | rnapi->coal_now;
11421
11422         err = -EIO;
11423
11424         tx_len = pktsz;
11425         skb = netdev_alloc_skb(tp->dev, tx_len);
11426         if (!skb)
11427                 return -ENOMEM;
11428
11429         tx_data = skb_put(skb, tx_len);
11430         memcpy(tx_data, tp->dev->dev_addr, 6);
11431         memset(tx_data + 6, 0x0, 8);
11432
11433         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11434
11435         if (tso_loopback) {
11436                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11437
11438                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11439                               TG3_TSO_TCP_OPT_LEN;
11440
11441                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11442                        sizeof(tg3_tso_header));
11443                 mss = TG3_TSO_MSS;
11444
11445                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11446                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11447
11448                 /* Set the total length field in the IP header */
11449                 iph->tot_len = htons((u16)(mss + hdr_len));
11450
11451                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11452                               TXD_FLAG_CPU_POST_DMA);
11453
11454                 if (tg3_flag(tp, HW_TSO_1) ||
11455                     tg3_flag(tp, HW_TSO_2) ||
11456                     tg3_flag(tp, HW_TSO_3)) {
11457                         struct tcphdr *th;
11458                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11459                         th = (struct tcphdr *)&tx_data[val];
11460                         th->check = 0;
11461                 } else
11462                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11463
11464                 if (tg3_flag(tp, HW_TSO_3)) {
11465                         mss |= (hdr_len & 0xc) << 12;
11466                         if (hdr_len & 0x10)
11467                                 base_flags |= 0x00000010;
11468                         base_flags |= (hdr_len & 0x3e0) << 5;
11469                 } else if (tg3_flag(tp, HW_TSO_2))
11470                         mss |= hdr_len << 9;
11471                 else if (tg3_flag(tp, HW_TSO_1) ||
11472                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11473                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11474                 } else {
11475                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11476                 }
11477
11478                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11479         } else {
11480                 num_pkts = 1;
11481                 data_off = ETH_HLEN;
11482         }
11483
11484         for (i = data_off; i < tx_len; i++)
11485                 tx_data[i] = (u8) (i & 0xff);
11486
11487         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11488         if (pci_dma_mapping_error(tp->pdev, map)) {
11489                 dev_kfree_skb(skb);
11490                 return -EIO;
11491         }
11492
11493         val = tnapi->tx_prod;
11494         tnapi->tx_buffers[val].skb = skb;
11495         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11496
11497         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11498                rnapi->coal_now);
11499
11500         udelay(10);
11501
11502         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11503
11504         budget = tg3_tx_avail(tnapi);
11505         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11506                             base_flags | TXD_FLAG_END, mss, 0)) {
11507                 tnapi->tx_buffers[val].skb = NULL;
11508                 dev_kfree_skb(skb);
11509                 return -EIO;
11510         }
11511
11512         tnapi->tx_prod++;
11513
11514         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11515         tr32_mailbox(tnapi->prodmbox);
11516
11517         udelay(10);
11518
11519         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11520         for (i = 0; i < 35; i++) {
11521                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11522                        coal_now);
11523
11524                 udelay(10);
11525
11526                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11527                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11528                 if ((tx_idx == tnapi->tx_prod) &&
11529                     (rx_idx == (rx_start_idx + num_pkts)))
11530                         break;
11531         }
11532
11533         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11534         dev_kfree_skb(skb);
11535
11536         if (tx_idx != tnapi->tx_prod)
11537                 goto out;
11538
11539         if (rx_idx != rx_start_idx + num_pkts)
11540                 goto out;
11541
11542         val = data_off;
11543         while (rx_idx != rx_start_idx) {
11544                 desc = &rnapi->rx_rcb[rx_start_idx++];
11545                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11546                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11547
11548                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11549                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11550                         goto out;
11551
11552                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11553                          - ETH_FCS_LEN;
11554
11555                 if (!tso_loopback) {
11556                         if (rx_len != tx_len)
11557                                 goto out;
11558
11559                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11560                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11561                                         goto out;
11562                         } else {
11563                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11564                                         goto out;
11565                         }
11566                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11567                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11568                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11569                         goto out;
11570                 }
11571
11572                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11573                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11574                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11575                                              mapping);
11576                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11577                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11578                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11579                                              mapping);
11580                 } else
11581                         goto out;
11582
11583                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11584                                             PCI_DMA_FROMDEVICE);
11585
11586                 for (i = data_off; i < rx_len; i++, val++) {
11587                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11588                                 goto out;
11589                 }
11590         }
11591
11592         err = 0;
11593
11594         /* tg3_free_rings will unmap and free the rx_skb */
11595 out:
11596         return err;
11597 }
11598
11599 #define TG3_STD_LOOPBACK_FAILED         1
11600 #define TG3_JMB_LOOPBACK_FAILED         2
11601 #define TG3_TSO_LOOPBACK_FAILED         4
11602 #define TG3_LOOPBACK_FAILED \
11603         (TG3_STD_LOOPBACK_FAILED | \
11604          TG3_JMB_LOOPBACK_FAILED | \
11605          TG3_TSO_LOOPBACK_FAILED)
11606
11607 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11608 {
11609         int err = -EIO;
11610         u32 eee_cap;
11611
11612         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11613         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11614
11615         if (!netif_running(tp->dev)) {
11616                 data[0] = TG3_LOOPBACK_FAILED;
11617                 data[1] = TG3_LOOPBACK_FAILED;
11618                 if (do_extlpbk)
11619                         data[2] = TG3_LOOPBACK_FAILED;
11620                 goto done;
11621         }
11622
11623         err = tg3_reset_hw(tp, 1);
11624         if (err) {
11625                 data[0] = TG3_LOOPBACK_FAILED;
11626                 data[1] = TG3_LOOPBACK_FAILED;
11627                 if (do_extlpbk)
11628                         data[2] = TG3_LOOPBACK_FAILED;
11629                 goto done;
11630         }
11631
11632         if (tg3_flag(tp, ENABLE_RSS)) {
11633                 int i;
11634
11635                 /* Reroute all rx packets to the 1st queue */
11636                 for (i = MAC_RSS_INDIR_TBL_0;
11637                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11638                         tw32(i, 0x0);
11639         }
11640
11641         /* HW errata - mac loopback fails in some cases on 5780.
11642          * Normal traffic and PHY loopback are not affected by
11643          * errata.  Also, the MAC loopback test is deprecated for
11644          * all newer ASIC revisions.
11645          */
11646         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11647             !tg3_flag(tp, CPMU_PRESENT)) {
11648                 tg3_mac_loopback(tp, true);
11649
11650                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11651                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11652
11653                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11654                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11655                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11656
11657                 tg3_mac_loopback(tp, false);
11658         }
11659
11660         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11661             !tg3_flag(tp, USE_PHYLIB)) {
11662                 int i;
11663
11664                 tg3_phy_lpbk_set(tp, 0, false);
11665
11666                 /* Wait for link */
11667                 for (i = 0; i < 100; i++) {
11668                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11669                                 break;
11670                         mdelay(1);
11671                 }
11672
11673                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11674                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11675                 if (tg3_flag(tp, TSO_CAPABLE) &&
11676                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11677                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11678                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11679                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11680                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11681
11682                 if (do_extlpbk) {
11683                         tg3_phy_lpbk_set(tp, 0, true);
11684
11685                         /* All link indications report up, but the hardware
11686                          * isn't really ready for about 20 msec.  Double it
11687                          * to be sure.
11688                          */
11689                         mdelay(40);
11690
11691                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11692                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11693                         if (tg3_flag(tp, TSO_CAPABLE) &&
11694                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11695                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11696                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11697                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11698                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11699                 }
11700
11701                 /* Re-enable gphy autopowerdown. */
11702                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11703                         tg3_phy_toggle_apd(tp, true);
11704         }
11705
11706         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11707
11708 done:
11709         tp->phy_flags |= eee_cap;
11710
11711         return err;
11712 }
11713
11714 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11715                           u64 *data)
11716 {
11717         struct tg3 *tp = netdev_priv(dev);
11718         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11719
11720         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11721             tg3_power_up(tp)) {
11722                 etest->flags |= ETH_TEST_FL_FAILED;
11723                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11724                 return;
11725         }
11726
11727         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11728
11729         if (tg3_test_nvram(tp) != 0) {
11730                 etest->flags |= ETH_TEST_FL_FAILED;
11731                 data[0] = 1;
11732         }
11733         if (!doextlpbk && tg3_test_link(tp)) {
11734                 etest->flags |= ETH_TEST_FL_FAILED;
11735                 data[1] = 1;
11736         }
11737         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11738                 int err, err2 = 0, irq_sync = 0;
11739
11740                 if (netif_running(dev)) {
11741                         tg3_phy_stop(tp);
11742                         tg3_netif_stop(tp);
11743                         irq_sync = 1;
11744                 }
11745
11746                 tg3_full_lock(tp, irq_sync);
11747
11748                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11749                 err = tg3_nvram_lock(tp);
11750                 tg3_halt_cpu(tp, RX_CPU_BASE);
11751                 if (!tg3_flag(tp, 5705_PLUS))
11752                         tg3_halt_cpu(tp, TX_CPU_BASE);
11753                 if (!err)
11754                         tg3_nvram_unlock(tp);
11755
11756                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11757                         tg3_phy_reset(tp);
11758
11759                 if (tg3_test_registers(tp) != 0) {
11760                         etest->flags |= ETH_TEST_FL_FAILED;
11761                         data[2] = 1;
11762                 }
11763
11764                 if (tg3_test_memory(tp) != 0) {
11765                         etest->flags |= ETH_TEST_FL_FAILED;
11766                         data[3] = 1;
11767                 }
11768
11769                 if (doextlpbk)
11770                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11771
11772                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11773                         etest->flags |= ETH_TEST_FL_FAILED;
11774
11775                 tg3_full_unlock(tp);
11776
11777                 if (tg3_test_interrupt(tp) != 0) {
11778                         etest->flags |= ETH_TEST_FL_FAILED;
11779                         data[7] = 1;
11780                 }
11781
11782                 tg3_full_lock(tp, 0);
11783
11784                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11785                 if (netif_running(dev)) {
11786                         tg3_flag_set(tp, INIT_COMPLETE);
11787                         err2 = tg3_restart_hw(tp, 1);
11788                         if (!err2)
11789                                 tg3_netif_start(tp);
11790                 }
11791
11792                 tg3_full_unlock(tp);
11793
11794                 if (irq_sync && !err2)
11795                         tg3_phy_start(tp);
11796         }
11797         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11798                 tg3_power_down(tp);
11799
11800 }
11801
11802 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11803 {
11804         struct mii_ioctl_data *data = if_mii(ifr);
11805         struct tg3 *tp = netdev_priv(dev);
11806         int err;
11807
11808         if (tg3_flag(tp, USE_PHYLIB)) {
11809                 struct phy_device *phydev;
11810                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11811                         return -EAGAIN;
11812                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11813                 return phy_mii_ioctl(phydev, ifr, cmd);
11814         }
11815
11816         switch (cmd) {
11817         case SIOCGMIIPHY:
11818                 data->phy_id = tp->phy_addr;
11819
11820                 /* fallthru */
11821         case SIOCGMIIREG: {
11822                 u32 mii_regval;
11823
11824                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11825                         break;                  /* We have no PHY */
11826
11827                 if (!netif_running(dev))
11828                         return -EAGAIN;
11829
11830                 spin_lock_bh(&tp->lock);
11831                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11832                 spin_unlock_bh(&tp->lock);
11833
11834                 data->val_out = mii_regval;
11835
11836                 return err;
11837         }
11838
11839         case SIOCSMIIREG:
11840                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11841                         break;                  /* We have no PHY */
11842
11843                 if (!netif_running(dev))
11844                         return -EAGAIN;
11845
11846                 spin_lock_bh(&tp->lock);
11847                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11848                 spin_unlock_bh(&tp->lock);
11849
11850                 return err;
11851
11852         default:
11853                 /* do nothing */
11854                 break;
11855         }
11856         return -EOPNOTSUPP;
11857 }
11858
11859 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11860 {
11861         struct tg3 *tp = netdev_priv(dev);
11862
11863         memcpy(ec, &tp->coal, sizeof(*ec));
11864         return 0;
11865 }
11866
11867 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11868 {
11869         struct tg3 *tp = netdev_priv(dev);
11870         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11871         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11872
11873         if (!tg3_flag(tp, 5705_PLUS)) {
11874                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11875                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11876                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11877                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11878         }
11879
11880         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11881             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11882             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11883             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11884             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11885             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11886             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11887             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11888             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11889             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11890                 return -EINVAL;
11891
11892         /* No rx interrupts will be generated if both are zero */
11893         if ((ec->rx_coalesce_usecs == 0) &&
11894             (ec->rx_max_coalesced_frames == 0))
11895                 return -EINVAL;
11896
11897         /* No tx interrupts will be generated if both are zero */
11898         if ((ec->tx_coalesce_usecs == 0) &&
11899             (ec->tx_max_coalesced_frames == 0))
11900                 return -EINVAL;
11901
11902         /* Only copy relevant parameters, ignore all others. */
11903         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11904         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11905         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11906         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11907         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11908         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11909         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11910         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11911         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11912
11913         if (netif_running(dev)) {
11914                 tg3_full_lock(tp, 0);
11915                 __tg3_set_coalesce(tp, &tp->coal);
11916                 tg3_full_unlock(tp);
11917         }
11918         return 0;
11919 }
11920
11921 static const struct ethtool_ops tg3_ethtool_ops = {
11922         .get_settings           = tg3_get_settings,
11923         .set_settings           = tg3_set_settings,
11924         .get_drvinfo            = tg3_get_drvinfo,
11925         .get_regs_len           = tg3_get_regs_len,
11926         .get_regs               = tg3_get_regs,
11927         .get_wol                = tg3_get_wol,
11928         .set_wol                = tg3_set_wol,
11929         .get_msglevel           = tg3_get_msglevel,
11930         .set_msglevel           = tg3_set_msglevel,
11931         .nway_reset             = tg3_nway_reset,
11932         .get_link               = ethtool_op_get_link,
11933         .get_eeprom_len         = tg3_get_eeprom_len,
11934         .get_eeprom             = tg3_get_eeprom,
11935         .set_eeprom             = tg3_set_eeprom,
11936         .get_ringparam          = tg3_get_ringparam,
11937         .set_ringparam          = tg3_set_ringparam,
11938         .get_pauseparam         = tg3_get_pauseparam,
11939         .set_pauseparam         = tg3_set_pauseparam,
11940         .self_test              = tg3_self_test,
11941         .get_strings            = tg3_get_strings,
11942         .set_phys_id            = tg3_set_phys_id,
11943         .get_ethtool_stats      = tg3_get_ethtool_stats,
11944         .get_coalesce           = tg3_get_coalesce,
11945         .set_coalesce           = tg3_set_coalesce,
11946         .get_sset_count         = tg3_get_sset_count,
11947 };
11948
11949 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11950 {
11951         u32 cursize, val, magic;
11952
11953         tp->nvram_size = EEPROM_CHIP_SIZE;
11954
11955         if (tg3_nvram_read(tp, 0, &magic) != 0)
11956                 return;
11957
11958         if ((magic != TG3_EEPROM_MAGIC) &&
11959             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11960             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11961                 return;
11962
11963         /*
11964          * Size the chip by reading offsets at increasing powers of two.
11965          * When we encounter our validation signature, we know the addressing
11966          * has wrapped around, and thus have our chip size.
11967          */
11968         cursize = 0x10;
11969
11970         while (cursize < tp->nvram_size) {
11971                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11972                         return;
11973
11974                 if (val == magic)
11975                         break;
11976
11977                 cursize <<= 1;
11978         }
11979
11980         tp->nvram_size = cursize;
11981 }
11982
11983 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11984 {
11985         u32 val;
11986
11987         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11988                 return;
11989
11990         /* Selfboot format */
11991         if (val != TG3_EEPROM_MAGIC) {
11992                 tg3_get_eeprom_size(tp);
11993                 return;
11994         }
11995
11996         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11997                 if (val != 0) {
11998                         /* This is confusing.  We want to operate on the
11999                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12000                          * call will read from NVRAM and byteswap the data
12001                          * according to the byteswapping settings for all
12002                          * other register accesses.  This ensures the data we
12003                          * want will always reside in the lower 16-bits.
12004                          * However, the data in NVRAM is in LE format, which
12005                          * means the data from the NVRAM read will always be
12006                          * opposite the endianness of the CPU.  The 16-bit
12007                          * byteswap then brings the data to CPU endianness.
12008                          */
12009                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12010                         return;
12011                 }
12012         }
12013         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12014 }
12015
12016 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12017 {
12018         u32 nvcfg1;
12019
12020         nvcfg1 = tr32(NVRAM_CFG1);
12021         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12022                 tg3_flag_set(tp, FLASH);
12023         } else {
12024                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12025                 tw32(NVRAM_CFG1, nvcfg1);
12026         }
12027
12028         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12029             tg3_flag(tp, 5780_CLASS)) {
12030                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12031                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12032                         tp->nvram_jedecnum = JEDEC_ATMEL;
12033                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12034                         tg3_flag_set(tp, NVRAM_BUFFERED);
12035                         break;
12036                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12037                         tp->nvram_jedecnum = JEDEC_ATMEL;
12038                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12039                         break;
12040                 case FLASH_VENDOR_ATMEL_EEPROM:
12041                         tp->nvram_jedecnum = JEDEC_ATMEL;
12042                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12043                         tg3_flag_set(tp, NVRAM_BUFFERED);
12044                         break;
12045                 case FLASH_VENDOR_ST:
12046                         tp->nvram_jedecnum = JEDEC_ST;
12047                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12048                         tg3_flag_set(tp, NVRAM_BUFFERED);
12049                         break;
12050                 case FLASH_VENDOR_SAIFUN:
12051                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12052                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12053                         break;
12054                 case FLASH_VENDOR_SST_SMALL:
12055                 case FLASH_VENDOR_SST_LARGE:
12056                         tp->nvram_jedecnum = JEDEC_SST;
12057                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12058                         break;
12059                 }
12060         } else {
12061                 tp->nvram_jedecnum = JEDEC_ATMEL;
12062                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12063                 tg3_flag_set(tp, NVRAM_BUFFERED);
12064         }
12065 }
12066
12067 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12068 {
12069         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12070         case FLASH_5752PAGE_SIZE_256:
12071                 tp->nvram_pagesize = 256;
12072                 break;
12073         case FLASH_5752PAGE_SIZE_512:
12074                 tp->nvram_pagesize = 512;
12075                 break;
12076         case FLASH_5752PAGE_SIZE_1K:
12077                 tp->nvram_pagesize = 1024;
12078                 break;
12079         case FLASH_5752PAGE_SIZE_2K:
12080                 tp->nvram_pagesize = 2048;
12081                 break;
12082         case FLASH_5752PAGE_SIZE_4K:
12083                 tp->nvram_pagesize = 4096;
12084                 break;
12085         case FLASH_5752PAGE_SIZE_264:
12086                 tp->nvram_pagesize = 264;
12087                 break;
12088         case FLASH_5752PAGE_SIZE_528:
12089                 tp->nvram_pagesize = 528;
12090                 break;
12091         }
12092 }
12093
12094 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12095 {
12096         u32 nvcfg1;
12097
12098         nvcfg1 = tr32(NVRAM_CFG1);
12099
12100         /* NVRAM protection for TPM */
12101         if (nvcfg1 & (1 << 27))
12102                 tg3_flag_set(tp, PROTECTED_NVRAM);
12103
12104         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12105         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12106         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12107                 tp->nvram_jedecnum = JEDEC_ATMEL;
12108                 tg3_flag_set(tp, NVRAM_BUFFERED);
12109                 break;
12110         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12111                 tp->nvram_jedecnum = JEDEC_ATMEL;
12112                 tg3_flag_set(tp, NVRAM_BUFFERED);
12113                 tg3_flag_set(tp, FLASH);
12114                 break;
12115         case FLASH_5752VENDOR_ST_M45PE10:
12116         case FLASH_5752VENDOR_ST_M45PE20:
12117         case FLASH_5752VENDOR_ST_M45PE40:
12118                 tp->nvram_jedecnum = JEDEC_ST;
12119                 tg3_flag_set(tp, NVRAM_BUFFERED);
12120                 tg3_flag_set(tp, FLASH);
12121                 break;
12122         }
12123
12124         if (tg3_flag(tp, FLASH)) {
12125                 tg3_nvram_get_pagesize(tp, nvcfg1);
12126         } else {
12127                 /* For eeprom, set pagesize to maximum eeprom size */
12128                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12129
12130                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12131                 tw32(NVRAM_CFG1, nvcfg1);
12132         }
12133 }
12134
12135 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12136 {
12137         u32 nvcfg1, protect = 0;
12138
12139         nvcfg1 = tr32(NVRAM_CFG1);
12140
12141         /* NVRAM protection for TPM */
12142         if (nvcfg1 & (1 << 27)) {
12143                 tg3_flag_set(tp, PROTECTED_NVRAM);
12144                 protect = 1;
12145         }
12146
12147         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12148         switch (nvcfg1) {
12149         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12150         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12151         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12152         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12153                 tp->nvram_jedecnum = JEDEC_ATMEL;
12154                 tg3_flag_set(tp, NVRAM_BUFFERED);
12155                 tg3_flag_set(tp, FLASH);
12156                 tp->nvram_pagesize = 264;
12157                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12158                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12159                         tp->nvram_size = (protect ? 0x3e200 :
12160                                           TG3_NVRAM_SIZE_512KB);
12161                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12162                         tp->nvram_size = (protect ? 0x1f200 :
12163                                           TG3_NVRAM_SIZE_256KB);
12164                 else
12165                         tp->nvram_size = (protect ? 0x1f200 :
12166                                           TG3_NVRAM_SIZE_128KB);
12167                 break;
12168         case FLASH_5752VENDOR_ST_M45PE10:
12169         case FLASH_5752VENDOR_ST_M45PE20:
12170         case FLASH_5752VENDOR_ST_M45PE40:
12171                 tp->nvram_jedecnum = JEDEC_ST;
12172                 tg3_flag_set(tp, NVRAM_BUFFERED);
12173                 tg3_flag_set(tp, FLASH);
12174                 tp->nvram_pagesize = 256;
12175                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12176                         tp->nvram_size = (protect ?
12177                                           TG3_NVRAM_SIZE_64KB :
12178                                           TG3_NVRAM_SIZE_128KB);
12179                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12180                         tp->nvram_size = (protect ?
12181                                           TG3_NVRAM_SIZE_64KB :
12182                                           TG3_NVRAM_SIZE_256KB);
12183                 else
12184                         tp->nvram_size = (protect ?
12185                                           TG3_NVRAM_SIZE_128KB :
12186                                           TG3_NVRAM_SIZE_512KB);
12187                 break;
12188         }
12189 }
12190
12191 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12192 {
12193         u32 nvcfg1;
12194
12195         nvcfg1 = tr32(NVRAM_CFG1);
12196
12197         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12198         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12199         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12200         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12201         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12202                 tp->nvram_jedecnum = JEDEC_ATMEL;
12203                 tg3_flag_set(tp, NVRAM_BUFFERED);
12204                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12205
12206                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12207                 tw32(NVRAM_CFG1, nvcfg1);
12208                 break;
12209         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12210         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12211         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12212         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12213                 tp->nvram_jedecnum = JEDEC_ATMEL;
12214                 tg3_flag_set(tp, NVRAM_BUFFERED);
12215                 tg3_flag_set(tp, FLASH);
12216                 tp->nvram_pagesize = 264;
12217                 break;
12218         case FLASH_5752VENDOR_ST_M45PE10:
12219         case FLASH_5752VENDOR_ST_M45PE20:
12220         case FLASH_5752VENDOR_ST_M45PE40:
12221                 tp->nvram_jedecnum = JEDEC_ST;
12222                 tg3_flag_set(tp, NVRAM_BUFFERED);
12223                 tg3_flag_set(tp, FLASH);
12224                 tp->nvram_pagesize = 256;
12225                 break;
12226         }
12227 }
12228
12229 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12230 {
12231         u32 nvcfg1, protect = 0;
12232
12233         nvcfg1 = tr32(NVRAM_CFG1);
12234
12235         /* NVRAM protection for TPM */
12236         if (nvcfg1 & (1 << 27)) {
12237                 tg3_flag_set(tp, PROTECTED_NVRAM);
12238                 protect = 1;
12239         }
12240
12241         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12242         switch (nvcfg1) {
12243         case FLASH_5761VENDOR_ATMEL_ADB021D:
12244         case FLASH_5761VENDOR_ATMEL_ADB041D:
12245         case FLASH_5761VENDOR_ATMEL_ADB081D:
12246         case FLASH_5761VENDOR_ATMEL_ADB161D:
12247         case FLASH_5761VENDOR_ATMEL_MDB021D:
12248         case FLASH_5761VENDOR_ATMEL_MDB041D:
12249         case FLASH_5761VENDOR_ATMEL_MDB081D:
12250         case FLASH_5761VENDOR_ATMEL_MDB161D:
12251                 tp->nvram_jedecnum = JEDEC_ATMEL;
12252                 tg3_flag_set(tp, NVRAM_BUFFERED);
12253                 tg3_flag_set(tp, FLASH);
12254                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12255                 tp->nvram_pagesize = 256;
12256                 break;
12257         case FLASH_5761VENDOR_ST_A_M45PE20:
12258         case FLASH_5761VENDOR_ST_A_M45PE40:
12259         case FLASH_5761VENDOR_ST_A_M45PE80:
12260         case FLASH_5761VENDOR_ST_A_M45PE16:
12261         case FLASH_5761VENDOR_ST_M_M45PE20:
12262         case FLASH_5761VENDOR_ST_M_M45PE40:
12263         case FLASH_5761VENDOR_ST_M_M45PE80:
12264         case FLASH_5761VENDOR_ST_M_M45PE16:
12265                 tp->nvram_jedecnum = JEDEC_ST;
12266                 tg3_flag_set(tp, NVRAM_BUFFERED);
12267                 tg3_flag_set(tp, FLASH);
12268                 tp->nvram_pagesize = 256;
12269                 break;
12270         }
12271
12272         if (protect) {
12273                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12274         } else {
12275                 switch (nvcfg1) {
12276                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12277                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12278                 case FLASH_5761VENDOR_ST_A_M45PE16:
12279                 case FLASH_5761VENDOR_ST_M_M45PE16:
12280                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12281                         break;
12282                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12283                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12284                 case FLASH_5761VENDOR_ST_A_M45PE80:
12285                 case FLASH_5761VENDOR_ST_M_M45PE80:
12286                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12287                         break;
12288                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12289                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12290                 case FLASH_5761VENDOR_ST_A_M45PE40:
12291                 case FLASH_5761VENDOR_ST_M_M45PE40:
12292                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12293                         break;
12294                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12295                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12296                 case FLASH_5761VENDOR_ST_A_M45PE20:
12297                 case FLASH_5761VENDOR_ST_M_M45PE20:
12298                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12299                         break;
12300                 }
12301         }
12302 }
12303
12304 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12305 {
12306         tp->nvram_jedecnum = JEDEC_ATMEL;
12307         tg3_flag_set(tp, NVRAM_BUFFERED);
12308         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12309 }
12310
12311 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12312 {
12313         u32 nvcfg1;
12314
12315         nvcfg1 = tr32(NVRAM_CFG1);
12316
12317         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12318         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12319         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12320                 tp->nvram_jedecnum = JEDEC_ATMEL;
12321                 tg3_flag_set(tp, NVRAM_BUFFERED);
12322                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12323
12324                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12325                 tw32(NVRAM_CFG1, nvcfg1);
12326                 return;
12327         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12328         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12329         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12330         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12331         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12332         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12333         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12334                 tp->nvram_jedecnum = JEDEC_ATMEL;
12335                 tg3_flag_set(tp, NVRAM_BUFFERED);
12336                 tg3_flag_set(tp, FLASH);
12337
12338                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12339                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12340                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12341                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12342                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12343                         break;
12344                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12345                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12346                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12347                         break;
12348                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12349                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12350                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12351                         break;
12352                 }
12353                 break;
12354         case FLASH_5752VENDOR_ST_M45PE10:
12355         case FLASH_5752VENDOR_ST_M45PE20:
12356         case FLASH_5752VENDOR_ST_M45PE40:
12357                 tp->nvram_jedecnum = JEDEC_ST;
12358                 tg3_flag_set(tp, NVRAM_BUFFERED);
12359                 tg3_flag_set(tp, FLASH);
12360
12361                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12362                 case FLASH_5752VENDOR_ST_M45PE10:
12363                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12364                         break;
12365                 case FLASH_5752VENDOR_ST_M45PE20:
12366                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12367                         break;
12368                 case FLASH_5752VENDOR_ST_M45PE40:
12369                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12370                         break;
12371                 }
12372                 break;
12373         default:
12374                 tg3_flag_set(tp, NO_NVRAM);
12375                 return;
12376         }
12377
12378         tg3_nvram_get_pagesize(tp, nvcfg1);
12379         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12380                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12381 }
12382
12383
12384 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12385 {
12386         u32 nvcfg1;
12387
12388         nvcfg1 = tr32(NVRAM_CFG1);
12389
12390         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12391         case FLASH_5717VENDOR_ATMEL_EEPROM:
12392         case FLASH_5717VENDOR_MICRO_EEPROM:
12393                 tp->nvram_jedecnum = JEDEC_ATMEL;
12394                 tg3_flag_set(tp, NVRAM_BUFFERED);
12395                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12396
12397                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12398                 tw32(NVRAM_CFG1, nvcfg1);
12399                 return;
12400         case FLASH_5717VENDOR_ATMEL_MDB011D:
12401         case FLASH_5717VENDOR_ATMEL_ADB011B:
12402         case FLASH_5717VENDOR_ATMEL_ADB011D:
12403         case FLASH_5717VENDOR_ATMEL_MDB021D:
12404         case FLASH_5717VENDOR_ATMEL_ADB021B:
12405         case FLASH_5717VENDOR_ATMEL_ADB021D:
12406         case FLASH_5717VENDOR_ATMEL_45USPT:
12407                 tp->nvram_jedecnum = JEDEC_ATMEL;
12408                 tg3_flag_set(tp, NVRAM_BUFFERED);
12409                 tg3_flag_set(tp, FLASH);
12410
12411                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12412                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12413                         /* Detect size with tg3_nvram_get_size() */
12414                         break;
12415                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12416                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12417                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12418                         break;
12419                 default:
12420                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12421                         break;
12422                 }
12423                 break;
12424         case FLASH_5717VENDOR_ST_M_M25PE10:
12425         case FLASH_5717VENDOR_ST_A_M25PE10:
12426         case FLASH_5717VENDOR_ST_M_M45PE10:
12427         case FLASH_5717VENDOR_ST_A_M45PE10:
12428         case FLASH_5717VENDOR_ST_M_M25PE20:
12429         case FLASH_5717VENDOR_ST_A_M25PE20:
12430         case FLASH_5717VENDOR_ST_M_M45PE20:
12431         case FLASH_5717VENDOR_ST_A_M45PE20:
12432         case FLASH_5717VENDOR_ST_25USPT:
12433         case FLASH_5717VENDOR_ST_45USPT:
12434                 tp->nvram_jedecnum = JEDEC_ST;
12435                 tg3_flag_set(tp, NVRAM_BUFFERED);
12436                 tg3_flag_set(tp, FLASH);
12437
12438                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12439                 case FLASH_5717VENDOR_ST_M_M25PE20:
12440                 case FLASH_5717VENDOR_ST_M_M45PE20:
12441                         /* Detect size with tg3_nvram_get_size() */
12442                         break;
12443                 case FLASH_5717VENDOR_ST_A_M25PE20:
12444                 case FLASH_5717VENDOR_ST_A_M45PE20:
12445                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12446                         break;
12447                 default:
12448                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12449                         break;
12450                 }
12451                 break;
12452         default:
12453                 tg3_flag_set(tp, NO_NVRAM);
12454                 return;
12455         }
12456
12457         tg3_nvram_get_pagesize(tp, nvcfg1);
12458         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12459                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12460 }
12461
12462 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12463 {
12464         u32 nvcfg1, nvmpinstrp;
12465
12466         nvcfg1 = tr32(NVRAM_CFG1);
12467         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12468
12469         switch (nvmpinstrp) {
12470         case FLASH_5720_EEPROM_HD:
12471         case FLASH_5720_EEPROM_LD:
12472                 tp->nvram_jedecnum = JEDEC_ATMEL;
12473                 tg3_flag_set(tp, NVRAM_BUFFERED);
12474
12475                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12476                 tw32(NVRAM_CFG1, nvcfg1);
12477                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12478                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12479                 else
12480                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12481                 return;
12482         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12483         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12484         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12485         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12486         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12487         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12488         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12489         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12490         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12491         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12492         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12493         case FLASH_5720VENDOR_ATMEL_45USPT:
12494                 tp->nvram_jedecnum = JEDEC_ATMEL;
12495                 tg3_flag_set(tp, NVRAM_BUFFERED);
12496                 tg3_flag_set(tp, FLASH);
12497
12498                 switch (nvmpinstrp) {
12499                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12500                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12501                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12502                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12503                         break;
12504                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12505                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12506                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12507                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12508                         break;
12509                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12510                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12511                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12512                         break;
12513                 default:
12514                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12515                         break;
12516                 }
12517                 break;
12518         case FLASH_5720VENDOR_M_ST_M25PE10:
12519         case FLASH_5720VENDOR_M_ST_M45PE10:
12520         case FLASH_5720VENDOR_A_ST_M25PE10:
12521         case FLASH_5720VENDOR_A_ST_M45PE10:
12522         case FLASH_5720VENDOR_M_ST_M25PE20:
12523         case FLASH_5720VENDOR_M_ST_M45PE20:
12524         case FLASH_5720VENDOR_A_ST_M25PE20:
12525         case FLASH_5720VENDOR_A_ST_M45PE20:
12526         case FLASH_5720VENDOR_M_ST_M25PE40:
12527         case FLASH_5720VENDOR_M_ST_M45PE40:
12528         case FLASH_5720VENDOR_A_ST_M25PE40:
12529         case FLASH_5720VENDOR_A_ST_M45PE40:
12530         case FLASH_5720VENDOR_M_ST_M25PE80:
12531         case FLASH_5720VENDOR_M_ST_M45PE80:
12532         case FLASH_5720VENDOR_A_ST_M25PE80:
12533         case FLASH_5720VENDOR_A_ST_M45PE80:
12534         case FLASH_5720VENDOR_ST_25USPT:
12535         case FLASH_5720VENDOR_ST_45USPT:
12536                 tp->nvram_jedecnum = JEDEC_ST;
12537                 tg3_flag_set(tp, NVRAM_BUFFERED);
12538                 tg3_flag_set(tp, FLASH);
12539
12540                 switch (nvmpinstrp) {
12541                 case FLASH_5720VENDOR_M_ST_M25PE20:
12542                 case FLASH_5720VENDOR_M_ST_M45PE20:
12543                 case FLASH_5720VENDOR_A_ST_M25PE20:
12544                 case FLASH_5720VENDOR_A_ST_M45PE20:
12545                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12546                         break;
12547                 case FLASH_5720VENDOR_M_ST_M25PE40:
12548                 case FLASH_5720VENDOR_M_ST_M45PE40:
12549                 case FLASH_5720VENDOR_A_ST_M25PE40:
12550                 case FLASH_5720VENDOR_A_ST_M45PE40:
12551                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12552                         break;
12553                 case FLASH_5720VENDOR_M_ST_M25PE80:
12554                 case FLASH_5720VENDOR_M_ST_M45PE80:
12555                 case FLASH_5720VENDOR_A_ST_M25PE80:
12556                 case FLASH_5720VENDOR_A_ST_M45PE80:
12557                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12558                         break;
12559                 default:
12560                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12561                         break;
12562                 }
12563                 break;
12564         default:
12565                 tg3_flag_set(tp, NO_NVRAM);
12566                 return;
12567         }
12568
12569         tg3_nvram_get_pagesize(tp, nvcfg1);
12570         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12571                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12572 }
12573
12574 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12575 static void __devinit tg3_nvram_init(struct tg3 *tp)
12576 {
12577         tw32_f(GRC_EEPROM_ADDR,
12578              (EEPROM_ADDR_FSM_RESET |
12579               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12580                EEPROM_ADDR_CLKPERD_SHIFT)));
12581
12582         msleep(1);
12583
12584         /* Enable seeprom accesses. */
12585         tw32_f(GRC_LOCAL_CTRL,
12586              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12587         udelay(100);
12588
12589         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12590             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12591                 tg3_flag_set(tp, NVRAM);
12592
12593                 if (tg3_nvram_lock(tp)) {
12594                         netdev_warn(tp->dev,
12595                                     "Cannot get nvram lock, %s failed\n",
12596                                     __func__);
12597                         return;
12598                 }
12599                 tg3_enable_nvram_access(tp);
12600
12601                 tp->nvram_size = 0;
12602
12603                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12604                         tg3_get_5752_nvram_info(tp);
12605                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12606                         tg3_get_5755_nvram_info(tp);
12607                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12608                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12609                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12610                         tg3_get_5787_nvram_info(tp);
12611                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12612                         tg3_get_5761_nvram_info(tp);
12613                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12614                         tg3_get_5906_nvram_info(tp);
12615                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12616                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12617                         tg3_get_57780_nvram_info(tp);
12618                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12619                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12620                         tg3_get_5717_nvram_info(tp);
12621                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12622                         tg3_get_5720_nvram_info(tp);
12623                 else
12624                         tg3_get_nvram_info(tp);
12625
12626                 if (tp->nvram_size == 0)
12627                         tg3_get_nvram_size(tp);
12628
12629                 tg3_disable_nvram_access(tp);
12630                 tg3_nvram_unlock(tp);
12631
12632         } else {
12633                 tg3_flag_clear(tp, NVRAM);
12634                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12635
12636                 tg3_get_eeprom_size(tp);
12637         }
12638 }
12639
12640 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12641                                     u32 offset, u32 len, u8 *buf)
12642 {
12643         int i, j, rc = 0;
12644         u32 val;
12645
12646         for (i = 0; i < len; i += 4) {
12647                 u32 addr;
12648                 __be32 data;
12649
12650                 addr = offset + i;
12651
12652                 memcpy(&data, buf + i, 4);
12653
12654                 /*
12655                  * The SEEPROM interface expects the data to always be opposite
12656                  * the native endian format.  We accomplish this by reversing
12657                  * all the operations that would have been performed on the
12658                  * data from a call to tg3_nvram_read_be32().
12659                  */
12660                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12661
12662                 val = tr32(GRC_EEPROM_ADDR);
12663                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12664
12665                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12666                         EEPROM_ADDR_READ);
12667                 tw32(GRC_EEPROM_ADDR, val |
12668                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12669                         (addr & EEPROM_ADDR_ADDR_MASK) |
12670                         EEPROM_ADDR_START |
12671                         EEPROM_ADDR_WRITE);
12672
12673                 for (j = 0; j < 1000; j++) {
12674                         val = tr32(GRC_EEPROM_ADDR);
12675
12676                         if (val & EEPROM_ADDR_COMPLETE)
12677                                 break;
12678                         msleep(1);
12679                 }
12680                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12681                         rc = -EBUSY;
12682                         break;
12683                 }
12684         }
12685
12686         return rc;
12687 }
12688
12689 /* offset and length are dword aligned */
12690 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12691                 u8 *buf)
12692 {
12693         int ret = 0;
12694         u32 pagesize = tp->nvram_pagesize;
12695         u32 pagemask = pagesize - 1;
12696         u32 nvram_cmd;
12697         u8 *tmp;
12698
12699         tmp = kmalloc(pagesize, GFP_KERNEL);
12700         if (tmp == NULL)
12701                 return -ENOMEM;
12702
12703         while (len) {
12704                 int j;
12705                 u32 phy_addr, page_off, size;
12706
12707                 phy_addr = offset & ~pagemask;
12708
12709                 for (j = 0; j < pagesize; j += 4) {
12710                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12711                                                   (__be32 *) (tmp + j));
12712                         if (ret)
12713                                 break;
12714                 }
12715                 if (ret)
12716                         break;
12717
12718                 page_off = offset & pagemask;
12719                 size = pagesize;
12720                 if (len < size)
12721                         size = len;
12722
12723                 len -= size;
12724
12725                 memcpy(tmp + page_off, buf, size);
12726
12727                 offset = offset + (pagesize - page_off);
12728
12729                 tg3_enable_nvram_access(tp);
12730
12731                 /*
12732                  * Before we can erase the flash page, we need
12733                  * to issue a special "write enable" command.
12734                  */
12735                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12736
12737                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12738                         break;
12739
12740                 /* Erase the target page */
12741                 tw32(NVRAM_ADDR, phy_addr);
12742
12743                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12744                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12745
12746                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12747                         break;
12748
12749                 /* Issue another write enable to start the write. */
12750                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12751
12752                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12753                         break;
12754
12755                 for (j = 0; j < pagesize; j += 4) {
12756                         __be32 data;
12757
12758                         data = *((__be32 *) (tmp + j));
12759
12760                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12761
12762                         tw32(NVRAM_ADDR, phy_addr + j);
12763
12764                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12765                                 NVRAM_CMD_WR;
12766
12767                         if (j == 0)
12768                                 nvram_cmd |= NVRAM_CMD_FIRST;
12769                         else if (j == (pagesize - 4))
12770                                 nvram_cmd |= NVRAM_CMD_LAST;
12771
12772                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12773                                 break;
12774                 }
12775                 if (ret)
12776                         break;
12777         }
12778
12779         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12780         tg3_nvram_exec_cmd(tp, nvram_cmd);
12781
12782         kfree(tmp);
12783
12784         return ret;
12785 }
12786
12787 /* offset and length are dword aligned */
12788 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12789                 u8 *buf)
12790 {
12791         int i, ret = 0;
12792
12793         for (i = 0; i < len; i += 4, offset += 4) {
12794                 u32 page_off, phy_addr, nvram_cmd;
12795                 __be32 data;
12796
12797                 memcpy(&data, buf + i, 4);
12798                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12799
12800                 page_off = offset % tp->nvram_pagesize;
12801
12802                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12803
12804                 tw32(NVRAM_ADDR, phy_addr);
12805
12806                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12807
12808                 if (page_off == 0 || i == 0)
12809                         nvram_cmd |= NVRAM_CMD_FIRST;
12810                 if (page_off == (tp->nvram_pagesize - 4))
12811                         nvram_cmd |= NVRAM_CMD_LAST;
12812
12813                 if (i == (len - 4))
12814                         nvram_cmd |= NVRAM_CMD_LAST;
12815
12816                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12817                     !tg3_flag(tp, 5755_PLUS) &&
12818                     (tp->nvram_jedecnum == JEDEC_ST) &&
12819                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12820
12821                         if ((ret = tg3_nvram_exec_cmd(tp,
12822                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12823                                 NVRAM_CMD_DONE)))
12824
12825                                 break;
12826                 }
12827                 if (!tg3_flag(tp, FLASH)) {
12828                         /* We always do complete word writes to eeprom. */
12829                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12830                 }
12831
12832                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12833                         break;
12834         }
12835         return ret;
12836 }
12837
12838 /* offset and length are dword aligned */
12839 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12840 {
12841         int ret;
12842
12843         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12844                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12845                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12846                 udelay(40);
12847         }
12848
12849         if (!tg3_flag(tp, NVRAM)) {
12850                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12851         } else {
12852                 u32 grc_mode;
12853
12854                 ret = tg3_nvram_lock(tp);
12855                 if (ret)
12856                         return ret;
12857
12858                 tg3_enable_nvram_access(tp);
12859                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12860                         tw32(NVRAM_WRITE1, 0x406);
12861
12862                 grc_mode = tr32(GRC_MODE);
12863                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12864
12865                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12866                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12867                                 buf);
12868                 } else {
12869                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12870                                 buf);
12871                 }
12872
12873                 grc_mode = tr32(GRC_MODE);
12874                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12875
12876                 tg3_disable_nvram_access(tp);
12877                 tg3_nvram_unlock(tp);
12878         }
12879
12880         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12881                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12882                 udelay(40);
12883         }
12884
12885         return ret;
12886 }
12887
12888 struct subsys_tbl_ent {
12889         u16 subsys_vendor, subsys_devid;
12890         u32 phy_id;
12891 };
12892
12893 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12894         /* Broadcom boards. */
12895         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12896           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12897         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12898           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12899         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12900           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12901         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12902           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12903         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12904           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12905         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12906           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12907         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12908           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12909         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12910           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12911         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12912           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12913         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12914           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12915         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12916           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12917
12918         /* 3com boards. */
12919         { TG3PCI_SUBVENDOR_ID_3COM,
12920           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12921         { TG3PCI_SUBVENDOR_ID_3COM,
12922           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12923         { TG3PCI_SUBVENDOR_ID_3COM,
12924           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12925         { TG3PCI_SUBVENDOR_ID_3COM,
12926           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12927         { TG3PCI_SUBVENDOR_ID_3COM,
12928           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12929
12930         /* DELL boards. */
12931         { TG3PCI_SUBVENDOR_ID_DELL,
12932           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12933         { TG3PCI_SUBVENDOR_ID_DELL,
12934           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12935         { TG3PCI_SUBVENDOR_ID_DELL,
12936           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12937         { TG3PCI_SUBVENDOR_ID_DELL,
12938           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12939
12940         /* Compaq boards. */
12941         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12942           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12943         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12944           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12945         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12946           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12947         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12948           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12949         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12950           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12951
12952         /* IBM boards. */
12953         { TG3PCI_SUBVENDOR_ID_IBM,
12954           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12955 };
12956
12957 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12958 {
12959         int i;
12960
12961         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12962                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12963                      tp->pdev->subsystem_vendor) &&
12964                     (subsys_id_to_phy_id[i].subsys_devid ==
12965                      tp->pdev->subsystem_device))
12966                         return &subsys_id_to_phy_id[i];
12967         }
12968         return NULL;
12969 }
12970
12971 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12972 {
12973         u32 val;
12974
12975         tp->phy_id = TG3_PHY_ID_INVALID;
12976         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12977
12978         /* Assume an onboard device and WOL capable by default.  */
12979         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12980         tg3_flag_set(tp, WOL_CAP);
12981
12982         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12983                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12984                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12985                         tg3_flag_set(tp, IS_NIC);
12986                 }
12987                 val = tr32(VCPU_CFGSHDW);
12988                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12989                         tg3_flag_set(tp, ASPM_WORKAROUND);
12990                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12991                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12992                         tg3_flag_set(tp, WOL_ENABLE);
12993                         device_set_wakeup_enable(&tp->pdev->dev, true);
12994                 }
12995                 goto done;
12996         }
12997
12998         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12999         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13000                 u32 nic_cfg, led_cfg;
13001                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13002                 int eeprom_phy_serdes = 0;
13003
13004                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13005                 tp->nic_sram_data_cfg = nic_cfg;
13006
13007                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13008                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13009                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13010                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13011                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13012                     (ver > 0) && (ver < 0x100))
13013                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13014
13015                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13016                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13017
13018                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13019                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13020                         eeprom_phy_serdes = 1;
13021
13022                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13023                 if (nic_phy_id != 0) {
13024                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13025                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13026
13027                         eeprom_phy_id  = (id1 >> 16) << 10;
13028                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13029                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13030                 } else
13031                         eeprom_phy_id = 0;
13032
13033                 tp->phy_id = eeprom_phy_id;
13034                 if (eeprom_phy_serdes) {
13035                         if (!tg3_flag(tp, 5705_PLUS))
13036                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13037                         else
13038                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13039                 }
13040
13041                 if (tg3_flag(tp, 5750_PLUS))
13042                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13043                                     SHASTA_EXT_LED_MODE_MASK);
13044                 else
13045                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13046
13047                 switch (led_cfg) {
13048                 default:
13049                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13050                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13051                         break;
13052
13053                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13054                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13055                         break;
13056
13057                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13058                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13059
13060                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13061                          * read on some older 5700/5701 bootcode.
13062                          */
13063                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13064                             ASIC_REV_5700 ||
13065                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13066                             ASIC_REV_5701)
13067                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13068
13069                         break;
13070
13071                 case SHASTA_EXT_LED_SHARED:
13072                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13073                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13074                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13075                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13076                                                  LED_CTRL_MODE_PHY_2);
13077                         break;
13078
13079                 case SHASTA_EXT_LED_MAC:
13080                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13081                         break;
13082
13083                 case SHASTA_EXT_LED_COMBO:
13084                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13085                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13086                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13087                                                  LED_CTRL_MODE_PHY_2);
13088                         break;
13089
13090                 }
13091
13092                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13093                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13094                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13095                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13096
13097                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13098                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13099
13100                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13101                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13102                         if ((tp->pdev->subsystem_vendor ==
13103                              PCI_VENDOR_ID_ARIMA) &&
13104                             (tp->pdev->subsystem_device == 0x205a ||
13105                              tp->pdev->subsystem_device == 0x2063))
13106                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13107                 } else {
13108                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13109                         tg3_flag_set(tp, IS_NIC);
13110                 }
13111
13112                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13113                         tg3_flag_set(tp, ENABLE_ASF);
13114                         if (tg3_flag(tp, 5750_PLUS))
13115                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13116                 }
13117
13118                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13119                     tg3_flag(tp, 5750_PLUS))
13120                         tg3_flag_set(tp, ENABLE_APE);
13121
13122                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13123                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13124                         tg3_flag_clear(tp, WOL_CAP);
13125
13126                 if (tg3_flag(tp, WOL_CAP) &&
13127                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13128                         tg3_flag_set(tp, WOL_ENABLE);
13129                         device_set_wakeup_enable(&tp->pdev->dev, true);
13130                 }
13131
13132                 if (cfg2 & (1 << 17))
13133                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13134
13135                 /* serdes signal pre-emphasis in register 0x590 set by */
13136                 /* bootcode if bit 18 is set */
13137                 if (cfg2 & (1 << 18))
13138                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13139
13140                 if ((tg3_flag(tp, 57765_PLUS) ||
13141                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13142                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13143                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13144                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13145
13146                 if (tg3_flag(tp, PCI_EXPRESS) &&
13147                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13148                     !tg3_flag(tp, 57765_PLUS)) {
13149                         u32 cfg3;
13150
13151                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13152                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13153                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13154                 }
13155
13156                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13157                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13158                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13159                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13160                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13161                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13162         }
13163 done:
13164         if (tg3_flag(tp, WOL_CAP))
13165                 device_set_wakeup_enable(&tp->pdev->dev,
13166                                          tg3_flag(tp, WOL_ENABLE));
13167         else
13168                 device_set_wakeup_capable(&tp->pdev->dev, false);
13169 }
13170
13171 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13172 {
13173         int i;
13174         u32 val;
13175
13176         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13177         tw32(OTP_CTRL, cmd);
13178
13179         /* Wait for up to 1 ms for command to execute. */
13180         for (i = 0; i < 100; i++) {
13181                 val = tr32(OTP_STATUS);
13182                 if (val & OTP_STATUS_CMD_DONE)
13183                         break;
13184                 udelay(10);
13185         }
13186
13187         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13188 }
13189
13190 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13191  * configuration is a 32-bit value that straddles the alignment boundary.
13192  * We do two 32-bit reads and then shift and merge the results.
13193  */
13194 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13195 {
13196         u32 bhalf_otp, thalf_otp;
13197
13198         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13199
13200         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13201                 return 0;
13202
13203         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13204
13205         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13206                 return 0;
13207
13208         thalf_otp = tr32(OTP_READ_DATA);
13209
13210         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13211
13212         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13213                 return 0;
13214
13215         bhalf_otp = tr32(OTP_READ_DATA);
13216
13217         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13218 }
13219
13220 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13221 {
13222         u32 adv = ADVERTISED_Autoneg |
13223                   ADVERTISED_Pause;
13224
13225         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13226                 adv |= ADVERTISED_1000baseT_Half |
13227                        ADVERTISED_1000baseT_Full;
13228
13229         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13230                 adv |= ADVERTISED_100baseT_Half |
13231                        ADVERTISED_100baseT_Full |
13232                        ADVERTISED_10baseT_Half |
13233                        ADVERTISED_10baseT_Full |
13234                        ADVERTISED_TP;
13235         else
13236                 adv |= ADVERTISED_FIBRE;
13237
13238         tp->link_config.advertising = adv;
13239         tp->link_config.speed = SPEED_INVALID;
13240         tp->link_config.duplex = DUPLEX_INVALID;
13241         tp->link_config.autoneg = AUTONEG_ENABLE;
13242         tp->link_config.active_speed = SPEED_INVALID;
13243         tp->link_config.active_duplex = DUPLEX_INVALID;
13244         tp->link_config.orig_speed = SPEED_INVALID;
13245         tp->link_config.orig_duplex = DUPLEX_INVALID;
13246         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13247 }
13248
13249 static int __devinit tg3_phy_probe(struct tg3 *tp)
13250 {
13251         u32 hw_phy_id_1, hw_phy_id_2;
13252         u32 hw_phy_id, hw_phy_id_masked;
13253         int err;
13254
13255         /* flow control autonegotiation is default behavior */
13256         tg3_flag_set(tp, PAUSE_AUTONEG);
13257         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13258
13259         if (tg3_flag(tp, USE_PHYLIB))
13260                 return tg3_phy_init(tp);
13261
13262         /* Reading the PHY ID register can conflict with ASF
13263          * firmware access to the PHY hardware.
13264          */
13265         err = 0;
13266         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13267                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13268         } else {
13269                 /* Now read the physical PHY_ID from the chip and verify
13270                  * that it is sane.  If it doesn't look good, we fall back
13271                  * to either the hard-coded table based PHY_ID and failing
13272                  * that the value found in the eeprom area.
13273                  */
13274                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13275                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13276
13277                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13278                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13279                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13280
13281                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13282         }
13283
13284         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13285                 tp->phy_id = hw_phy_id;
13286                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13287                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13288                 else
13289                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13290         } else {
13291                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13292                         /* Do nothing, phy ID already set up in
13293                          * tg3_get_eeprom_hw_cfg().
13294                          */
13295                 } else {
13296                         struct subsys_tbl_ent *p;
13297
13298                         /* No eeprom signature?  Try the hardcoded
13299                          * subsys device table.
13300                          */
13301                         p = tg3_lookup_by_subsys(tp);
13302                         if (!p)
13303                                 return -ENODEV;
13304
13305                         tp->phy_id = p->phy_id;
13306                         if (!tp->phy_id ||
13307                             tp->phy_id == TG3_PHY_ID_BCM8002)
13308                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13309                 }
13310         }
13311
13312         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13313             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13314              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13315              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13316               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13317              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13318               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13319                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13320
13321         tg3_phy_init_link_config(tp);
13322
13323         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13324             !tg3_flag(tp, ENABLE_APE) &&
13325             !tg3_flag(tp, ENABLE_ASF)) {
13326                 u32 bmsr, mask;
13327
13328                 tg3_readphy(tp, MII_BMSR, &bmsr);
13329                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13330                     (bmsr & BMSR_LSTATUS))
13331                         goto skip_phy_reset;
13332
13333                 err = tg3_phy_reset(tp);
13334                 if (err)
13335                         return err;
13336
13337                 tg3_phy_set_wirespeed(tp);
13338
13339                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13340                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13341                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13342                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13343                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13344                                             tp->link_config.flowctrl);
13345
13346                         tg3_writephy(tp, MII_BMCR,
13347                                      BMCR_ANENABLE | BMCR_ANRESTART);
13348                 }
13349         }
13350
13351 skip_phy_reset:
13352         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13353                 err = tg3_init_5401phy_dsp(tp);
13354                 if (err)
13355                         return err;
13356
13357                 err = tg3_init_5401phy_dsp(tp);
13358         }
13359
13360         return err;
13361 }
13362
13363 static void __devinit tg3_read_vpd(struct tg3 *tp)
13364 {
13365         u8 *vpd_data;
13366         unsigned int block_end, rosize, len;
13367         u32 vpdlen;
13368         int j, i = 0;
13369
13370         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13371         if (!vpd_data)
13372                 goto out_no_vpd;
13373
13374         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13375         if (i < 0)
13376                 goto out_not_found;
13377
13378         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13379         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13380         i += PCI_VPD_LRDT_TAG_SIZE;
13381
13382         if (block_end > vpdlen)
13383                 goto out_not_found;
13384
13385         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13386                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13387         if (j > 0) {
13388                 len = pci_vpd_info_field_size(&vpd_data[j]);
13389
13390                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13391                 if (j + len > block_end || len != 4 ||
13392                     memcmp(&vpd_data[j], "1028", 4))
13393                         goto partno;
13394
13395                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13396                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13397                 if (j < 0)
13398                         goto partno;
13399
13400                 len = pci_vpd_info_field_size(&vpd_data[j]);
13401
13402                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13403                 if (j + len > block_end)
13404                         goto partno;
13405
13406                 memcpy(tp->fw_ver, &vpd_data[j], len);
13407                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13408         }
13409
13410 partno:
13411         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13412                                       PCI_VPD_RO_KEYWORD_PARTNO);
13413         if (i < 0)
13414                 goto out_not_found;
13415
13416         len = pci_vpd_info_field_size(&vpd_data[i]);
13417
13418         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13419         if (len > TG3_BPN_SIZE ||
13420             (len + i) > vpdlen)
13421                 goto out_not_found;
13422
13423         memcpy(tp->board_part_number, &vpd_data[i], len);
13424
13425 out_not_found:
13426         kfree(vpd_data);
13427         if (tp->board_part_number[0])
13428                 return;
13429
13430 out_no_vpd:
13431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13432                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13433                         strcpy(tp->board_part_number, "BCM5717");
13434                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13435                         strcpy(tp->board_part_number, "BCM5718");
13436                 else
13437                         goto nomatch;
13438         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13439                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13440                         strcpy(tp->board_part_number, "BCM57780");
13441                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13442                         strcpy(tp->board_part_number, "BCM57760");
13443                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13444                         strcpy(tp->board_part_number, "BCM57790");
13445                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13446                         strcpy(tp->board_part_number, "BCM57788");
13447                 else
13448                         goto nomatch;
13449         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13450                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13451                         strcpy(tp->board_part_number, "BCM57761");
13452                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13453                         strcpy(tp->board_part_number, "BCM57765");
13454                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13455                         strcpy(tp->board_part_number, "BCM57781");
13456                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13457                         strcpy(tp->board_part_number, "BCM57785");
13458                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13459                         strcpy(tp->board_part_number, "BCM57791");
13460                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13461                         strcpy(tp->board_part_number, "BCM57795");
13462                 else
13463                         goto nomatch;
13464         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13465                 strcpy(tp->board_part_number, "BCM95906");
13466         } else {
13467 nomatch:
13468                 strcpy(tp->board_part_number, "none");
13469         }
13470 }
13471
13472 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13473 {
13474         u32 val;
13475
13476         if (tg3_nvram_read(tp, offset, &val) ||
13477             (val & 0xfc000000) != 0x0c000000 ||
13478             tg3_nvram_read(tp, offset + 4, &val) ||
13479             val != 0)
13480                 return 0;
13481
13482         return 1;
13483 }
13484
13485 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13486 {
13487         u32 val, offset, start, ver_offset;
13488         int i, dst_off;
13489         bool newver = false;
13490
13491         if (tg3_nvram_read(tp, 0xc, &offset) ||
13492             tg3_nvram_read(tp, 0x4, &start))
13493                 return;
13494
13495         offset = tg3_nvram_logical_addr(tp, offset);
13496
13497         if (tg3_nvram_read(tp, offset, &val))
13498                 return;
13499
13500         if ((val & 0xfc000000) == 0x0c000000) {
13501                 if (tg3_nvram_read(tp, offset + 4, &val))
13502                         return;
13503
13504                 if (val == 0)
13505                         newver = true;
13506         }
13507
13508         dst_off = strlen(tp->fw_ver);
13509
13510         if (newver) {
13511                 if (TG3_VER_SIZE - dst_off < 16 ||
13512                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13513                         return;
13514
13515                 offset = offset + ver_offset - start;
13516                 for (i = 0; i < 16; i += 4) {
13517                         __be32 v;
13518                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13519                                 return;
13520
13521                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13522                 }
13523         } else {
13524                 u32 major, minor;
13525
13526                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13527                         return;
13528
13529                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13530                         TG3_NVM_BCVER_MAJSFT;
13531                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13532                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13533                          "v%d.%02d", major, minor);
13534         }
13535 }
13536
13537 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13538 {
13539         u32 val, major, minor;
13540
13541         /* Use native endian representation */
13542         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13543                 return;
13544
13545         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13546                 TG3_NVM_HWSB_CFG1_MAJSFT;
13547         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13548                 TG3_NVM_HWSB_CFG1_MINSFT;
13549
13550         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13551 }
13552
13553 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13554 {
13555         u32 offset, major, minor, build;
13556
13557         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13558
13559         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13560                 return;
13561
13562         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13563         case TG3_EEPROM_SB_REVISION_0:
13564                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13565                 break;
13566         case TG3_EEPROM_SB_REVISION_2:
13567                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13568                 break;
13569         case TG3_EEPROM_SB_REVISION_3:
13570                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13571                 break;
13572         case TG3_EEPROM_SB_REVISION_4:
13573                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13574                 break;
13575         case TG3_EEPROM_SB_REVISION_5:
13576                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13577                 break;
13578         case TG3_EEPROM_SB_REVISION_6:
13579                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13580                 break;
13581         default:
13582                 return;
13583         }
13584
13585         if (tg3_nvram_read(tp, offset, &val))
13586                 return;
13587
13588         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13589                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13590         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13591                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13592         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13593
13594         if (minor > 99 || build > 26)
13595                 return;
13596
13597         offset = strlen(tp->fw_ver);
13598         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13599                  " v%d.%02d", major, minor);
13600
13601         if (build > 0) {
13602                 offset = strlen(tp->fw_ver);
13603                 if (offset < TG3_VER_SIZE - 1)
13604                         tp->fw_ver[offset] = 'a' + build - 1;
13605         }
13606 }
13607
13608 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13609 {
13610         u32 val, offset, start;
13611         int i, vlen;
13612
13613         for (offset = TG3_NVM_DIR_START;
13614              offset < TG3_NVM_DIR_END;
13615              offset += TG3_NVM_DIRENT_SIZE) {
13616                 if (tg3_nvram_read(tp, offset, &val))
13617                         return;
13618
13619                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13620                         break;
13621         }
13622
13623         if (offset == TG3_NVM_DIR_END)
13624                 return;
13625
13626         if (!tg3_flag(tp, 5705_PLUS))
13627                 start = 0x08000000;
13628         else if (tg3_nvram_read(tp, offset - 4, &start))
13629                 return;
13630
13631         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13632             !tg3_fw_img_is_valid(tp, offset) ||
13633             tg3_nvram_read(tp, offset + 8, &val))
13634                 return;
13635
13636         offset += val - start;
13637
13638         vlen = strlen(tp->fw_ver);
13639
13640         tp->fw_ver[vlen++] = ',';
13641         tp->fw_ver[vlen++] = ' ';
13642
13643         for (i = 0; i < 4; i++) {
13644                 __be32 v;
13645                 if (tg3_nvram_read_be32(tp, offset, &v))
13646                         return;
13647
13648                 offset += sizeof(v);
13649
13650                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13651                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13652                         break;
13653                 }
13654
13655                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13656                 vlen += sizeof(v);
13657         }
13658 }
13659
13660 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13661 {
13662         int vlen;
13663         u32 apedata;
13664         char *fwtype;
13665
13666         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13667                 return;
13668
13669         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13670         if (apedata != APE_SEG_SIG_MAGIC)
13671                 return;
13672
13673         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13674         if (!(apedata & APE_FW_STATUS_READY))
13675                 return;
13676
13677         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13678
13679         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13680                 tg3_flag_set(tp, APE_HAS_NCSI);
13681                 fwtype = "NCSI";
13682         } else {
13683                 fwtype = "DASH";
13684         }
13685
13686         vlen = strlen(tp->fw_ver);
13687
13688         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13689                  fwtype,
13690                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13691                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13692                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13693                  (apedata & APE_FW_VERSION_BLDMSK));
13694 }
13695
13696 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13697 {
13698         u32 val;
13699         bool vpd_vers = false;
13700
13701         if (tp->fw_ver[0] != 0)
13702                 vpd_vers = true;
13703
13704         if (tg3_flag(tp, NO_NVRAM)) {
13705                 strcat(tp->fw_ver, "sb");
13706                 return;
13707         }
13708
13709         if (tg3_nvram_read(tp, 0, &val))
13710                 return;
13711
13712         if (val == TG3_EEPROM_MAGIC)
13713                 tg3_read_bc_ver(tp);
13714         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13715                 tg3_read_sb_ver(tp, val);
13716         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13717                 tg3_read_hwsb_ver(tp);
13718         else
13719                 return;
13720
13721         if (vpd_vers)
13722                 goto done;
13723
13724         if (tg3_flag(tp, ENABLE_APE)) {
13725                 if (tg3_flag(tp, ENABLE_ASF))
13726                         tg3_read_dash_ver(tp);
13727         } else if (tg3_flag(tp, ENABLE_ASF)) {
13728                 tg3_read_mgmtfw_ver(tp);
13729         }
13730
13731 done:
13732         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13733 }
13734
13735 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13736
13737 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13738 {
13739         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13740                 return TG3_RX_RET_MAX_SIZE_5717;
13741         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13742                 return TG3_RX_RET_MAX_SIZE_5700;
13743         else
13744                 return TG3_RX_RET_MAX_SIZE_5705;
13745 }
13746
13747 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13748         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13749         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13750         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13751         { },
13752 };
13753
13754 static int __devinit tg3_get_invariants(struct tg3 *tp)
13755 {
13756         u32 misc_ctrl_reg;
13757         u32 pci_state_reg, grc_misc_cfg;
13758         u32 val;
13759         u16 pci_cmd;
13760         int err;
13761
13762         /* Force memory write invalidate off.  If we leave it on,
13763          * then on 5700_BX chips we have to enable a workaround.
13764          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13765          * to match the cacheline size.  The Broadcom driver have this
13766          * workaround but turns MWI off all the times so never uses
13767          * it.  This seems to suggest that the workaround is insufficient.
13768          */
13769         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13770         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13771         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13772
13773         /* Important! -- Make sure register accesses are byteswapped
13774          * correctly.  Also, for those chips that require it, make
13775          * sure that indirect register accesses are enabled before
13776          * the first operation.
13777          */
13778         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13779                               &misc_ctrl_reg);
13780         tp->misc_host_ctrl |= (misc_ctrl_reg &
13781                                MISC_HOST_CTRL_CHIPREV);
13782         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13783                                tp->misc_host_ctrl);
13784
13785         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13786                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13787         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13788                 u32 prod_id_asic_rev;
13789
13790                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13791                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13792                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13793                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13794                         pci_read_config_dword(tp->pdev,
13795                                               TG3PCI_GEN2_PRODID_ASICREV,
13796                                               &prod_id_asic_rev);
13797                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13798                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13799                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13800                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13801                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13802                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13803                         pci_read_config_dword(tp->pdev,
13804                                               TG3PCI_GEN15_PRODID_ASICREV,
13805                                               &prod_id_asic_rev);
13806                 else
13807                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13808                                               &prod_id_asic_rev);
13809
13810                 tp->pci_chip_rev_id = prod_id_asic_rev;
13811         }
13812
13813         /* Wrong chip ID in 5752 A0. This code can be removed later
13814          * as A0 is not in production.
13815          */
13816         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13817                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13818
13819         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13820          * we need to disable memory and use config. cycles
13821          * only to access all registers. The 5702/03 chips
13822          * can mistakenly decode the special cycles from the
13823          * ICH chipsets as memory write cycles, causing corruption
13824          * of register and memory space. Only certain ICH bridges
13825          * will drive special cycles with non-zero data during the
13826          * address phase which can fall within the 5703's address
13827          * range. This is not an ICH bug as the PCI spec allows
13828          * non-zero address during special cycles. However, only
13829          * these ICH bridges are known to drive non-zero addresses
13830          * during special cycles.
13831          *
13832          * Since special cycles do not cross PCI bridges, we only
13833          * enable this workaround if the 5703 is on the secondary
13834          * bus of these ICH bridges.
13835          */
13836         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13837             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13838                 static struct tg3_dev_id {
13839                         u32     vendor;
13840                         u32     device;
13841                         u32     rev;
13842                 } ich_chipsets[] = {
13843                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13844                           PCI_ANY_ID },
13845                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13846                           PCI_ANY_ID },
13847                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13848                           0xa },
13849                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13850                           PCI_ANY_ID },
13851                         { },
13852                 };
13853                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13854                 struct pci_dev *bridge = NULL;
13855
13856                 while (pci_id->vendor != 0) {
13857                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13858                                                 bridge);
13859                         if (!bridge) {
13860                                 pci_id++;
13861                                 continue;
13862                         }
13863                         if (pci_id->rev != PCI_ANY_ID) {
13864                                 if (bridge->revision > pci_id->rev)
13865                                         continue;
13866                         }
13867                         if (bridge->subordinate &&
13868                             (bridge->subordinate->number ==
13869                              tp->pdev->bus->number)) {
13870                                 tg3_flag_set(tp, ICH_WORKAROUND);
13871                                 pci_dev_put(bridge);
13872                                 break;
13873                         }
13874                 }
13875         }
13876
13877         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13878                 static struct tg3_dev_id {
13879                         u32     vendor;
13880                         u32     device;
13881                 } bridge_chipsets[] = {
13882                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13883                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13884                         { },
13885                 };
13886                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13887                 struct pci_dev *bridge = NULL;
13888
13889                 while (pci_id->vendor != 0) {
13890                         bridge = pci_get_device(pci_id->vendor,
13891                                                 pci_id->device,
13892                                                 bridge);
13893                         if (!bridge) {
13894                                 pci_id++;
13895                                 continue;
13896                         }
13897                         if (bridge->subordinate &&
13898                             (bridge->subordinate->number <=
13899                              tp->pdev->bus->number) &&
13900                             (bridge->subordinate->subordinate >=
13901                              tp->pdev->bus->number)) {
13902                                 tg3_flag_set(tp, 5701_DMA_BUG);
13903                                 pci_dev_put(bridge);
13904                                 break;
13905                         }
13906                 }
13907         }
13908
13909         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13910          * DMA addresses > 40-bit. This bridge may have other additional
13911          * 57xx devices behind it in some 4-port NIC designs for example.
13912          * Any tg3 device found behind the bridge will also need the 40-bit
13913          * DMA workaround.
13914          */
13915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13916             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13917                 tg3_flag_set(tp, 5780_CLASS);
13918                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13919                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13920         } else {
13921                 struct pci_dev *bridge = NULL;
13922
13923                 do {
13924                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13925                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13926                                                 bridge);
13927                         if (bridge && bridge->subordinate &&
13928                             (bridge->subordinate->number <=
13929                              tp->pdev->bus->number) &&
13930                             (bridge->subordinate->subordinate >=
13931                              tp->pdev->bus->number)) {
13932                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13933                                 pci_dev_put(bridge);
13934                                 break;
13935                         }
13936                 } while (bridge);
13937         }
13938
13939         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13940             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13941                 tp->pdev_peer = tg3_find_peer(tp);
13942
13943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13945             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13946                 tg3_flag_set(tp, 5717_PLUS);
13947
13948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13949             tg3_flag(tp, 5717_PLUS))
13950                 tg3_flag_set(tp, 57765_PLUS);
13951
13952         /* Intentionally exclude ASIC_REV_5906 */
13953         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13954             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13955             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13956             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13957             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13959             tg3_flag(tp, 57765_PLUS))
13960                 tg3_flag_set(tp, 5755_PLUS);
13961
13962         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13963             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13964             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13965             tg3_flag(tp, 5755_PLUS) ||
13966             tg3_flag(tp, 5780_CLASS))
13967                 tg3_flag_set(tp, 5750_PLUS);
13968
13969         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13970             tg3_flag(tp, 5750_PLUS))
13971                 tg3_flag_set(tp, 5705_PLUS);
13972
13973         /* Determine TSO capabilities */
13974         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13975                 ; /* Do nothing. HW bug. */
13976         else if (tg3_flag(tp, 57765_PLUS))
13977                 tg3_flag_set(tp, HW_TSO_3);
13978         else if (tg3_flag(tp, 5755_PLUS) ||
13979                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13980                 tg3_flag_set(tp, HW_TSO_2);
13981         else if (tg3_flag(tp, 5750_PLUS)) {
13982                 tg3_flag_set(tp, HW_TSO_1);
13983                 tg3_flag_set(tp, TSO_BUG);
13984                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13985                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13986                         tg3_flag_clear(tp, TSO_BUG);
13987         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13988                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13989                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13990                         tg3_flag_set(tp, TSO_BUG);
13991                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13992                         tp->fw_needed = FIRMWARE_TG3TSO5;
13993                 else
13994                         tp->fw_needed = FIRMWARE_TG3TSO;
13995         }
13996
13997         /* Selectively allow TSO based on operating conditions */
13998         if (tg3_flag(tp, HW_TSO_1) ||
13999             tg3_flag(tp, HW_TSO_2) ||
14000             tg3_flag(tp, HW_TSO_3) ||
14001             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
14002                 tg3_flag_set(tp, TSO_CAPABLE);
14003         else {
14004                 tg3_flag_clear(tp, TSO_CAPABLE);
14005                 tg3_flag_clear(tp, TSO_BUG);
14006                 tp->fw_needed = NULL;
14007         }
14008
14009         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14010                 tp->fw_needed = FIRMWARE_TG3;
14011
14012         tp->irq_max = 1;
14013
14014         if (tg3_flag(tp, 5750_PLUS)) {
14015                 tg3_flag_set(tp, SUPPORT_MSI);
14016                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14017                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14018                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14019                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14020                      tp->pdev_peer == tp->pdev))
14021                         tg3_flag_clear(tp, SUPPORT_MSI);
14022
14023                 if (tg3_flag(tp, 5755_PLUS) ||
14024                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14025                         tg3_flag_set(tp, 1SHOT_MSI);
14026                 }
14027
14028                 if (tg3_flag(tp, 57765_PLUS)) {
14029                         tg3_flag_set(tp, SUPPORT_MSIX);
14030                         tp->irq_max = TG3_IRQ_MAX_VECS;
14031                 }
14032         }
14033
14034         if (tg3_flag(tp, 5755_PLUS))
14035                 tg3_flag_set(tp, SHORT_DMA_BUG);
14036
14037         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14038                 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14039
14040         if (tg3_flag(tp, 5717_PLUS))
14041                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14042
14043         if (tg3_flag(tp, 57765_PLUS) &&
14044             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14045                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14046
14047         if (!tg3_flag(tp, 5705_PLUS) ||
14048             tg3_flag(tp, 5780_CLASS) ||
14049             tg3_flag(tp, USE_JUMBO_BDFLAG))
14050                 tg3_flag_set(tp, JUMBO_CAPABLE);
14051
14052         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14053                               &pci_state_reg);
14054
14055         if (pci_is_pcie(tp->pdev)) {
14056                 u16 lnkctl;
14057
14058                 tg3_flag_set(tp, PCI_EXPRESS);
14059
14060                 tp->pcie_readrq = 4096;
14061                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14062                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14063                         tp->pcie_readrq = 2048;
14064
14065                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14066
14067                 pci_read_config_word(tp->pdev,
14068                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14069                                      &lnkctl);
14070                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14071                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14072                             ASIC_REV_5906) {
14073                                 tg3_flag_clear(tp, HW_TSO_2);
14074                                 tg3_flag_clear(tp, TSO_CAPABLE);
14075                         }
14076                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14077                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14078                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14079                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14080                                 tg3_flag_set(tp, CLKREQ_BUG);
14081                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14082                         tg3_flag_set(tp, L1PLLPD_EN);
14083                 }
14084         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14085                 /* BCM5785 devices are effectively PCIe devices, and should
14086                  * follow PCIe codepaths, but do not have a PCIe capabilities
14087                  * section.
14088                  */
14089                 tg3_flag_set(tp, PCI_EXPRESS);
14090         } else if (!tg3_flag(tp, 5705_PLUS) ||
14091                    tg3_flag(tp, 5780_CLASS)) {
14092                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14093                 if (!tp->pcix_cap) {
14094                         dev_err(&tp->pdev->dev,
14095                                 "Cannot find PCI-X capability, aborting\n");
14096                         return -EIO;
14097                 }
14098
14099                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14100                         tg3_flag_set(tp, PCIX_MODE);
14101         }
14102
14103         /* If we have an AMD 762 or VIA K8T800 chipset, write
14104          * reordering to the mailbox registers done by the host
14105          * controller can cause major troubles.  We read back from
14106          * every mailbox register write to force the writes to be
14107          * posted to the chip in order.
14108          */
14109         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14110             !tg3_flag(tp, PCI_EXPRESS))
14111                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14112
14113         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14114                              &tp->pci_cacheline_sz);
14115         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14116                              &tp->pci_lat_timer);
14117         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14118             tp->pci_lat_timer < 64) {
14119                 tp->pci_lat_timer = 64;
14120                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14121                                       tp->pci_lat_timer);
14122         }
14123
14124         /* Important! -- It is critical that the PCI-X hw workaround
14125          * situation is decided before the first MMIO register access.
14126          */
14127         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14128                 /* 5700 BX chips need to have their TX producer index
14129                  * mailboxes written twice to workaround a bug.
14130                  */
14131                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14132
14133                 /* If we are in PCI-X mode, enable register write workaround.
14134                  *
14135                  * The workaround is to use indirect register accesses
14136                  * for all chip writes not to mailbox registers.
14137                  */
14138                 if (tg3_flag(tp, PCIX_MODE)) {
14139                         u32 pm_reg;
14140
14141                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14142
14143                         /* The chip can have it's power management PCI config
14144                          * space registers clobbered due to this bug.
14145                          * So explicitly force the chip into D0 here.
14146                          */
14147                         pci_read_config_dword(tp->pdev,
14148                                               tp->pm_cap + PCI_PM_CTRL,
14149                                               &pm_reg);
14150                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14151                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14152                         pci_write_config_dword(tp->pdev,
14153                                                tp->pm_cap + PCI_PM_CTRL,
14154                                                pm_reg);
14155
14156                         /* Also, force SERR#/PERR# in PCI command. */
14157                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14158                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14159                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14160                 }
14161         }
14162
14163         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14164                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14165         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14166                 tg3_flag_set(tp, PCI_32BIT);
14167
14168         /* Chip-specific fixup from Broadcom driver */
14169         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14170             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14171                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14172                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14173         }
14174
14175         /* Default fast path register access methods */
14176         tp->read32 = tg3_read32;
14177         tp->write32 = tg3_write32;
14178         tp->read32_mbox = tg3_read32;
14179         tp->write32_mbox = tg3_write32;
14180         tp->write32_tx_mbox = tg3_write32;
14181         tp->write32_rx_mbox = tg3_write32;
14182
14183         /* Various workaround register access methods */
14184         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14185                 tp->write32 = tg3_write_indirect_reg32;
14186         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14187                  (tg3_flag(tp, PCI_EXPRESS) &&
14188                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14189                 /*
14190                  * Back to back register writes can cause problems on these
14191                  * chips, the workaround is to read back all reg writes
14192                  * except those to mailbox regs.
14193                  *
14194                  * See tg3_write_indirect_reg32().
14195                  */
14196                 tp->write32 = tg3_write_flush_reg32;
14197         }
14198
14199         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14200                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14201                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14202                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14203         }
14204
14205         if (tg3_flag(tp, ICH_WORKAROUND)) {
14206                 tp->read32 = tg3_read_indirect_reg32;
14207                 tp->write32 = tg3_write_indirect_reg32;
14208                 tp->read32_mbox = tg3_read_indirect_mbox;
14209                 tp->write32_mbox = tg3_write_indirect_mbox;
14210                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14211                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14212
14213                 iounmap(tp->regs);
14214                 tp->regs = NULL;
14215
14216                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14217                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14218                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14219         }
14220         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14221                 tp->read32_mbox = tg3_read32_mbox_5906;
14222                 tp->write32_mbox = tg3_write32_mbox_5906;
14223                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14224                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14225         }
14226
14227         if (tp->write32 == tg3_write_indirect_reg32 ||
14228             (tg3_flag(tp, PCIX_MODE) &&
14229              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14230               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14231                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14232
14233         /* The memory arbiter has to be enabled in order for SRAM accesses
14234          * to succeed.  Normally on powerup the tg3 chip firmware will make
14235          * sure it is enabled, but other entities such as system netboot
14236          * code might disable it.
14237          */
14238         val = tr32(MEMARB_MODE);
14239         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14240
14241         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14242         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14243             tg3_flag(tp, 5780_CLASS)) {
14244                 if (tg3_flag(tp, PCIX_MODE)) {
14245                         pci_read_config_dword(tp->pdev,
14246                                               tp->pcix_cap + PCI_X_STATUS,
14247                                               &val);
14248                         tp->pci_fn = val & 0x7;
14249                 }
14250         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14251                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14252                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14253                     NIC_SRAM_CPMUSTAT_SIG) {
14254                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14255                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14256                 }
14257         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14258                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14259                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14260                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14261                     NIC_SRAM_CPMUSTAT_SIG) {
14262                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14263                                      TG3_CPMU_STATUS_FSHFT_5719;
14264                 }
14265         }
14266
14267         /* Get eeprom hw config before calling tg3_set_power_state().
14268          * In particular, the TG3_FLAG_IS_NIC flag must be
14269          * determined before calling tg3_set_power_state() so that
14270          * we know whether or not to switch out of Vaux power.
14271          * When the flag is set, it means that GPIO1 is used for eeprom
14272          * write protect and also implies that it is a LOM where GPIOs
14273          * are not used to switch power.
14274          */
14275         tg3_get_eeprom_hw_cfg(tp);
14276
14277         if (tg3_flag(tp, ENABLE_APE)) {
14278                 /* Allow reads and writes to the
14279                  * APE register and memory space.
14280                  */
14281                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14282                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14283                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14284                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14285                                        pci_state_reg);
14286
14287                 tg3_ape_lock_init(tp);
14288         }
14289
14290         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14291             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14292             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14293             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14294             tg3_flag(tp, 57765_PLUS))
14295                 tg3_flag_set(tp, CPMU_PRESENT);
14296
14297         /* Set up tp->grc_local_ctrl before calling
14298          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14299          * will bring 5700's external PHY out of reset.
14300          * It is also used as eeprom write protect on LOMs.
14301          */
14302         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14303         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14304             tg3_flag(tp, EEPROM_WRITE_PROT))
14305                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14306                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14307         /* Unused GPIO3 must be driven as output on 5752 because there
14308          * are no pull-up resistors on unused GPIO pins.
14309          */
14310         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14311                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14312
14313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14314             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14315             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14316                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14317
14318         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14319             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14320                 /* Turn off the debug UART. */
14321                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14322                 if (tg3_flag(tp, IS_NIC))
14323                         /* Keep VMain power. */
14324                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14325                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14326         }
14327
14328         /* Switch out of Vaux if it is a NIC */
14329         tg3_pwrsrc_switch_to_vmain(tp);
14330
14331         /* Derive initial jumbo mode from MTU assigned in
14332          * ether_setup() via the alloc_etherdev() call
14333          */
14334         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14335                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14336
14337         /* Determine WakeOnLan speed to use. */
14338         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14339             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14340             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14341             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14342                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14343         } else {
14344                 tg3_flag_set(tp, WOL_SPEED_100MB);
14345         }
14346
14347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14348                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14349
14350         /* A few boards don't want Ethernet@WireSpeed phy feature */
14351         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14352             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14353              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14354              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14355             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14356             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14357                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14358
14359         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14360             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14361                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14362         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14363                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14364
14365         if (tg3_flag(tp, 5705_PLUS) &&
14366             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14367             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14368             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14369             !tg3_flag(tp, 57765_PLUS)) {
14370                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14371                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14372                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14373                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14374                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14375                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14376                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14377                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14378                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14379                 } else
14380                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14381         }
14382
14383         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14384             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14385                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14386                 if (tp->phy_otp == 0)
14387                         tp->phy_otp = TG3_OTP_DEFAULT;
14388         }
14389
14390         if (tg3_flag(tp, CPMU_PRESENT))
14391                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14392         else
14393                 tp->mi_mode = MAC_MI_MODE_BASE;
14394
14395         tp->coalesce_mode = 0;
14396         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14397             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14398                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14399
14400         /* Set these bits to enable statistics workaround. */
14401         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14402             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14403             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14404                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14405                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14406         }
14407
14408         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14409             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14410                 tg3_flag_set(tp, USE_PHYLIB);
14411
14412         err = tg3_mdio_init(tp);
14413         if (err)
14414                 return err;
14415
14416         /* Initialize data/descriptor byte/word swapping. */
14417         val = tr32(GRC_MODE);
14418         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14419                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14420                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14421                         GRC_MODE_B2HRX_ENABLE |
14422                         GRC_MODE_HTX2B_ENABLE |
14423                         GRC_MODE_HOST_STACKUP);
14424         else
14425                 val &= GRC_MODE_HOST_STACKUP;
14426
14427         tw32(GRC_MODE, val | tp->grc_mode);
14428
14429         tg3_switch_clocks(tp);
14430
14431         /* Clear this out for sanity. */
14432         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14433
14434         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14435                               &pci_state_reg);
14436         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14437             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14438                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14439
14440                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14441                     chiprevid == CHIPREV_ID_5701_B0 ||
14442                     chiprevid == CHIPREV_ID_5701_B2 ||
14443                     chiprevid == CHIPREV_ID_5701_B5) {
14444                         void __iomem *sram_base;
14445
14446                         /* Write some dummy words into the SRAM status block
14447                          * area, see if it reads back correctly.  If the return
14448                          * value is bad, force enable the PCIX workaround.
14449                          */
14450                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14451
14452                         writel(0x00000000, sram_base);
14453                         writel(0x00000000, sram_base + 4);
14454                         writel(0xffffffff, sram_base + 4);
14455                         if (readl(sram_base) != 0x00000000)
14456                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14457                 }
14458         }
14459
14460         udelay(50);
14461         tg3_nvram_init(tp);
14462
14463         grc_misc_cfg = tr32(GRC_MISC_CFG);
14464         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14465
14466         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14467             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14468              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14469                 tg3_flag_set(tp, IS_5788);
14470
14471         if (!tg3_flag(tp, IS_5788) &&
14472             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14473                 tg3_flag_set(tp, TAGGED_STATUS);
14474         if (tg3_flag(tp, TAGGED_STATUS)) {
14475                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14476                                       HOSTCC_MODE_CLRTICK_TXBD);
14477
14478                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14479                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14480                                        tp->misc_host_ctrl);
14481         }
14482
14483         /* Preserve the APE MAC_MODE bits */
14484         if (tg3_flag(tp, ENABLE_APE))
14485                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14486         else
14487                 tp->mac_mode = 0;
14488
14489         /* these are limited to 10/100 only */
14490         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14491              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14492             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14493              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14494              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14495               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14496               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14497             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14498              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14499               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14500               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14501             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14502             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14503             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14504             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14505                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14506
14507         err = tg3_phy_probe(tp);
14508         if (err) {
14509                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14510                 /* ... but do not return immediately ... */
14511                 tg3_mdio_fini(tp);
14512         }
14513
14514         tg3_read_vpd(tp);
14515         tg3_read_fw_ver(tp);
14516
14517         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14518                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14519         } else {
14520                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14521                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14522                 else
14523                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14524         }
14525
14526         /* 5700 {AX,BX} chips have a broken status block link
14527          * change bit implementation, so we must use the
14528          * status register in those cases.
14529          */
14530         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14531                 tg3_flag_set(tp, USE_LINKCHG_REG);
14532         else
14533                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14534
14535         /* The led_ctrl is set during tg3_phy_probe, here we might
14536          * have to force the link status polling mechanism based
14537          * upon subsystem IDs.
14538          */
14539         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14540             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14541             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14542                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14543                 tg3_flag_set(tp, USE_LINKCHG_REG);
14544         }
14545
14546         /* For all SERDES we poll the MAC status register. */
14547         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14548                 tg3_flag_set(tp, POLL_SERDES);
14549         else
14550                 tg3_flag_clear(tp, POLL_SERDES);
14551
14552         tp->rx_offset = NET_IP_ALIGN;
14553         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14554         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14555             tg3_flag(tp, PCIX_MODE)) {
14556                 tp->rx_offset = 0;
14557 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14558                 tp->rx_copy_thresh = ~(u16)0;
14559 #endif
14560         }
14561
14562         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14563         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14564         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14565
14566         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14567
14568         /* Increment the rx prod index on the rx std ring by at most
14569          * 8 for these chips to workaround hw errata.
14570          */
14571         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14572             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14573             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14574                 tp->rx_std_max_post = 8;
14575
14576         if (tg3_flag(tp, ASPM_WORKAROUND))
14577                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14578                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14579
14580         return err;
14581 }
14582
14583 #ifdef CONFIG_SPARC
14584 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14585 {
14586         struct net_device *dev = tp->dev;
14587         struct pci_dev *pdev = tp->pdev;
14588         struct device_node *dp = pci_device_to_OF_node(pdev);
14589         const unsigned char *addr;
14590         int len;
14591
14592         addr = of_get_property(dp, "local-mac-address", &len);
14593         if (addr && len == 6) {
14594                 memcpy(dev->dev_addr, addr, 6);
14595                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14596                 return 0;
14597         }
14598         return -ENODEV;
14599 }
14600
14601 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14602 {
14603         struct net_device *dev = tp->dev;
14604
14605         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14606         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14607         return 0;
14608 }
14609 #endif
14610
14611 static int __devinit tg3_get_device_address(struct tg3 *tp)
14612 {
14613         struct net_device *dev = tp->dev;
14614         u32 hi, lo, mac_offset;
14615         int addr_ok = 0;
14616
14617 #ifdef CONFIG_SPARC
14618         if (!tg3_get_macaddr_sparc(tp))
14619                 return 0;
14620 #endif
14621
14622         mac_offset = 0x7c;
14623         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14624             tg3_flag(tp, 5780_CLASS)) {
14625                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14626                         mac_offset = 0xcc;
14627                 if (tg3_nvram_lock(tp))
14628                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14629                 else
14630                         tg3_nvram_unlock(tp);
14631         } else if (tg3_flag(tp, 5717_PLUS)) {
14632                 if (tp->pci_fn & 1)
14633                         mac_offset = 0xcc;
14634                 if (tp->pci_fn > 1)
14635                         mac_offset += 0x18c;
14636         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14637                 mac_offset = 0x10;
14638
14639         /* First try to get it from MAC address mailbox. */
14640         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14641         if ((hi >> 16) == 0x484b) {
14642                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14643                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14644
14645                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14646                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14647                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14648                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14649                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14650
14651                 /* Some old bootcode may report a 0 MAC address in SRAM */
14652                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14653         }
14654         if (!addr_ok) {
14655                 /* Next, try NVRAM. */
14656                 if (!tg3_flag(tp, NO_NVRAM) &&
14657                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14658                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14659                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14660                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14661                 }
14662                 /* Finally just fetch it out of the MAC control regs. */
14663                 else {
14664                         hi = tr32(MAC_ADDR_0_HIGH);
14665                         lo = tr32(MAC_ADDR_0_LOW);
14666
14667                         dev->dev_addr[5] = lo & 0xff;
14668                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14669                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14670                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14671                         dev->dev_addr[1] = hi & 0xff;
14672                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14673                 }
14674         }
14675
14676         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14677 #ifdef CONFIG_SPARC
14678                 if (!tg3_get_default_macaddr_sparc(tp))
14679                         return 0;
14680 #endif
14681                 return -EINVAL;
14682         }
14683         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14684         return 0;
14685 }
14686
14687 #define BOUNDARY_SINGLE_CACHELINE       1
14688 #define BOUNDARY_MULTI_CACHELINE        2
14689
14690 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14691 {
14692         int cacheline_size;
14693         u8 byte;
14694         int goal;
14695
14696         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14697         if (byte == 0)
14698                 cacheline_size = 1024;
14699         else
14700                 cacheline_size = (int) byte * 4;
14701
14702         /* On 5703 and later chips, the boundary bits have no
14703          * effect.
14704          */
14705         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14706             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14707             !tg3_flag(tp, PCI_EXPRESS))
14708                 goto out;
14709
14710 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14711         goal = BOUNDARY_MULTI_CACHELINE;
14712 #else
14713 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14714         goal = BOUNDARY_SINGLE_CACHELINE;
14715 #else
14716         goal = 0;
14717 #endif
14718 #endif
14719
14720         if (tg3_flag(tp, 57765_PLUS)) {
14721                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14722                 goto out;
14723         }
14724
14725         if (!goal)
14726                 goto out;
14727
14728         /* PCI controllers on most RISC systems tend to disconnect
14729          * when a device tries to burst across a cache-line boundary.
14730          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14731          *
14732          * Unfortunately, for PCI-E there are only limited
14733          * write-side controls for this, and thus for reads
14734          * we will still get the disconnects.  We'll also waste
14735          * these PCI cycles for both read and write for chips
14736          * other than 5700 and 5701 which do not implement the
14737          * boundary bits.
14738          */
14739         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14740                 switch (cacheline_size) {
14741                 case 16:
14742                 case 32:
14743                 case 64:
14744                 case 128:
14745                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14746                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14747                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14748                         } else {
14749                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14750                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14751                         }
14752                         break;
14753
14754                 case 256:
14755                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14756                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14757                         break;
14758
14759                 default:
14760                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14761                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14762                         break;
14763                 }
14764         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14765                 switch (cacheline_size) {
14766                 case 16:
14767                 case 32:
14768                 case 64:
14769                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14770                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14771                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14772                                 break;
14773                         }
14774                         /* fallthrough */
14775                 case 128:
14776                 default:
14777                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14778                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14779                         break;
14780                 }
14781         } else {
14782                 switch (cacheline_size) {
14783                 case 16:
14784                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14785                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14786                                         DMA_RWCTRL_WRITE_BNDRY_16);
14787                                 break;
14788                         }
14789                         /* fallthrough */
14790                 case 32:
14791                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14792                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14793                                         DMA_RWCTRL_WRITE_BNDRY_32);
14794                                 break;
14795                         }
14796                         /* fallthrough */
14797                 case 64:
14798                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14799                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14800                                         DMA_RWCTRL_WRITE_BNDRY_64);
14801                                 break;
14802                         }
14803                         /* fallthrough */
14804                 case 128:
14805                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14806                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14807                                         DMA_RWCTRL_WRITE_BNDRY_128);
14808                                 break;
14809                         }
14810                         /* fallthrough */
14811                 case 256:
14812                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14813                                 DMA_RWCTRL_WRITE_BNDRY_256);
14814                         break;
14815                 case 512:
14816                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14817                                 DMA_RWCTRL_WRITE_BNDRY_512);
14818                         break;
14819                 case 1024:
14820                 default:
14821                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14822                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14823                         break;
14824                 }
14825         }
14826
14827 out:
14828         return val;
14829 }
14830
14831 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14832 {
14833         struct tg3_internal_buffer_desc test_desc;
14834         u32 sram_dma_descs;
14835         int i, ret;
14836
14837         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14838
14839         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14840         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14841         tw32(RDMAC_STATUS, 0);
14842         tw32(WDMAC_STATUS, 0);
14843
14844         tw32(BUFMGR_MODE, 0);
14845         tw32(FTQ_RESET, 0);
14846
14847         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14848         test_desc.addr_lo = buf_dma & 0xffffffff;
14849         test_desc.nic_mbuf = 0x00002100;
14850         test_desc.len = size;
14851
14852         /*
14853          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14854          * the *second* time the tg3 driver was getting loaded after an
14855          * initial scan.
14856          *
14857          * Broadcom tells me:
14858          *   ...the DMA engine is connected to the GRC block and a DMA
14859          *   reset may affect the GRC block in some unpredictable way...
14860          *   The behavior of resets to individual blocks has not been tested.
14861          *
14862          * Broadcom noted the GRC reset will also reset all sub-components.
14863          */
14864         if (to_device) {
14865                 test_desc.cqid_sqid = (13 << 8) | 2;
14866
14867                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14868                 udelay(40);
14869         } else {
14870                 test_desc.cqid_sqid = (16 << 8) | 7;
14871
14872                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14873                 udelay(40);
14874         }
14875         test_desc.flags = 0x00000005;
14876
14877         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14878                 u32 val;
14879
14880                 val = *(((u32 *)&test_desc) + i);
14881                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14882                                        sram_dma_descs + (i * sizeof(u32)));
14883                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14884         }
14885         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14886
14887         if (to_device)
14888                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14889         else
14890                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14891
14892         ret = -ENODEV;
14893         for (i = 0; i < 40; i++) {
14894                 u32 val;
14895
14896                 if (to_device)
14897                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14898                 else
14899                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14900                 if ((val & 0xffff) == sram_dma_descs) {
14901                         ret = 0;
14902                         break;
14903                 }
14904
14905                 udelay(100);
14906         }
14907
14908         return ret;
14909 }
14910
14911 #define TEST_BUFFER_SIZE        0x2000
14912
14913 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14914         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14915         { },
14916 };
14917
14918 static int __devinit tg3_test_dma(struct tg3 *tp)
14919 {
14920         dma_addr_t buf_dma;
14921         u32 *buf, saved_dma_rwctrl;
14922         int ret = 0;
14923
14924         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14925                                  &buf_dma, GFP_KERNEL);
14926         if (!buf) {
14927                 ret = -ENOMEM;
14928                 goto out_nofree;
14929         }
14930
14931         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14932                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14933
14934         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14935
14936         if (tg3_flag(tp, 57765_PLUS))
14937                 goto out;
14938
14939         if (tg3_flag(tp, PCI_EXPRESS)) {
14940                 /* DMA read watermark not used on PCIE */
14941                 tp->dma_rwctrl |= 0x00180000;
14942         } else if (!tg3_flag(tp, PCIX_MODE)) {
14943                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14944                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14945                         tp->dma_rwctrl |= 0x003f0000;
14946                 else
14947                         tp->dma_rwctrl |= 0x003f000f;
14948         } else {
14949                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14950                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14951                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14952                         u32 read_water = 0x7;
14953
14954                         /* If the 5704 is behind the EPB bridge, we can
14955                          * do the less restrictive ONE_DMA workaround for
14956                          * better performance.
14957                          */
14958                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14959                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14960                                 tp->dma_rwctrl |= 0x8000;
14961                         else if (ccval == 0x6 || ccval == 0x7)
14962                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14963
14964                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14965                                 read_water = 4;
14966                         /* Set bit 23 to enable PCIX hw bug fix */
14967                         tp->dma_rwctrl |=
14968                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14969                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14970                                 (1 << 23);
14971                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14972                         /* 5780 always in PCIX mode */
14973                         tp->dma_rwctrl |= 0x00144000;
14974                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14975                         /* 5714 always in PCIX mode */
14976                         tp->dma_rwctrl |= 0x00148000;
14977                 } else {
14978                         tp->dma_rwctrl |= 0x001b000f;
14979                 }
14980         }
14981
14982         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14983             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14984                 tp->dma_rwctrl &= 0xfffffff0;
14985
14986         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14987             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14988                 /* Remove this if it causes problems for some boards. */
14989                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14990
14991                 /* On 5700/5701 chips, we need to set this bit.
14992                  * Otherwise the chip will issue cacheline transactions
14993                  * to streamable DMA memory with not all the byte
14994                  * enables turned on.  This is an error on several
14995                  * RISC PCI controllers, in particular sparc64.
14996                  *
14997                  * On 5703/5704 chips, this bit has been reassigned
14998                  * a different meaning.  In particular, it is used
14999                  * on those chips to enable a PCI-X workaround.
15000                  */
15001                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15002         }
15003
15004         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15005
15006 #if 0
15007         /* Unneeded, already done by tg3_get_invariants.  */
15008         tg3_switch_clocks(tp);
15009 #endif
15010
15011         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15012             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15013                 goto out;
15014
15015         /* It is best to perform DMA test with maximum write burst size
15016          * to expose the 5700/5701 write DMA bug.
15017          */
15018         saved_dma_rwctrl = tp->dma_rwctrl;
15019         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15020         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15021
15022         while (1) {
15023                 u32 *p = buf, i;
15024
15025                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15026                         p[i] = i;
15027
15028                 /* Send the buffer to the chip. */
15029                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15030                 if (ret) {
15031                         dev_err(&tp->pdev->dev,
15032                                 "%s: Buffer write failed. err = %d\n",
15033                                 __func__, ret);
15034                         break;
15035                 }
15036
15037 #if 0
15038                 /* validate data reached card RAM correctly. */
15039                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15040                         u32 val;
15041                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15042                         if (le32_to_cpu(val) != p[i]) {
15043                                 dev_err(&tp->pdev->dev,
15044                                         "%s: Buffer corrupted on device! "
15045                                         "(%d != %d)\n", __func__, val, i);
15046                                 /* ret = -ENODEV here? */
15047                         }
15048                         p[i] = 0;
15049                 }
15050 #endif
15051                 /* Now read it back. */
15052                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15053                 if (ret) {
15054                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15055                                 "err = %d\n", __func__, ret);
15056                         break;
15057                 }
15058
15059                 /* Verify it. */
15060                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15061                         if (p[i] == i)
15062                                 continue;
15063
15064                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15065                             DMA_RWCTRL_WRITE_BNDRY_16) {
15066                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15067                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15068                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15069                                 break;
15070                         } else {
15071                                 dev_err(&tp->pdev->dev,
15072                                         "%s: Buffer corrupted on read back! "
15073                                         "(%d != %d)\n", __func__, p[i], i);
15074                                 ret = -ENODEV;
15075                                 goto out;
15076                         }
15077                 }
15078
15079                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15080                         /* Success. */
15081                         ret = 0;
15082                         break;
15083                 }
15084         }
15085         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15086             DMA_RWCTRL_WRITE_BNDRY_16) {
15087                 /* DMA test passed without adjusting DMA boundary,
15088                  * now look for chipsets that are known to expose the
15089                  * DMA bug without failing the test.
15090                  */
15091                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15092                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15093                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15094                 } else {
15095                         /* Safe to use the calculated DMA boundary. */
15096                         tp->dma_rwctrl = saved_dma_rwctrl;
15097                 }
15098
15099                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15100         }
15101
15102 out:
15103         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15104 out_nofree:
15105         return ret;
15106 }
15107
15108 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15109 {
15110         if (tg3_flag(tp, 57765_PLUS)) {
15111                 tp->bufmgr_config.mbuf_read_dma_low_water =
15112                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15113                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15114                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15115                 tp->bufmgr_config.mbuf_high_water =
15116                         DEFAULT_MB_HIGH_WATER_57765;
15117
15118                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15119                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15120                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15121                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15122                 tp->bufmgr_config.mbuf_high_water_jumbo =
15123                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15124         } else if (tg3_flag(tp, 5705_PLUS)) {
15125                 tp->bufmgr_config.mbuf_read_dma_low_water =
15126                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15127                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15128                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15129                 tp->bufmgr_config.mbuf_high_water =
15130                         DEFAULT_MB_HIGH_WATER_5705;
15131                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15132                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15133                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15134                         tp->bufmgr_config.mbuf_high_water =
15135                                 DEFAULT_MB_HIGH_WATER_5906;
15136                 }
15137
15138                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15139                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15140                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15141                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15142                 tp->bufmgr_config.mbuf_high_water_jumbo =
15143                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15144         } else {
15145                 tp->bufmgr_config.mbuf_read_dma_low_water =
15146                         DEFAULT_MB_RDMA_LOW_WATER;
15147                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15148                         DEFAULT_MB_MACRX_LOW_WATER;
15149                 tp->bufmgr_config.mbuf_high_water =
15150                         DEFAULT_MB_HIGH_WATER;
15151
15152                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15153                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15154                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15155                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15156                 tp->bufmgr_config.mbuf_high_water_jumbo =
15157                         DEFAULT_MB_HIGH_WATER_JUMBO;
15158         }
15159
15160         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15161         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15162 }
15163
15164 static char * __devinit tg3_phy_string(struct tg3 *tp)
15165 {
15166         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15167         case TG3_PHY_ID_BCM5400:        return "5400";
15168         case TG3_PHY_ID_BCM5401:        return "5401";
15169         case TG3_PHY_ID_BCM5411:        return "5411";
15170         case TG3_PHY_ID_BCM5701:        return "5701";
15171         case TG3_PHY_ID_BCM5703:        return "5703";
15172         case TG3_PHY_ID_BCM5704:        return "5704";
15173         case TG3_PHY_ID_BCM5705:        return "5705";
15174         case TG3_PHY_ID_BCM5750:        return "5750";
15175         case TG3_PHY_ID_BCM5752:        return "5752";
15176         case TG3_PHY_ID_BCM5714:        return "5714";
15177         case TG3_PHY_ID_BCM5780:        return "5780";
15178         case TG3_PHY_ID_BCM5755:        return "5755";
15179         case TG3_PHY_ID_BCM5787:        return "5787";
15180         case TG3_PHY_ID_BCM5784:        return "5784";
15181         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15182         case TG3_PHY_ID_BCM5906:        return "5906";
15183         case TG3_PHY_ID_BCM5761:        return "5761";
15184         case TG3_PHY_ID_BCM5718C:       return "5718C";
15185         case TG3_PHY_ID_BCM5718S:       return "5718S";
15186         case TG3_PHY_ID_BCM57765:       return "57765";
15187         case TG3_PHY_ID_BCM5719C:       return "5719C";
15188         case TG3_PHY_ID_BCM5720C:       return "5720C";
15189         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15190         case 0:                 return "serdes";
15191         default:                return "unknown";
15192         }
15193 }
15194
15195 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15196 {
15197         if (tg3_flag(tp, PCI_EXPRESS)) {
15198                 strcpy(str, "PCI Express");
15199                 return str;
15200         } else if (tg3_flag(tp, PCIX_MODE)) {
15201                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15202
15203                 strcpy(str, "PCIX:");
15204
15205                 if ((clock_ctrl == 7) ||
15206                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15207                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15208                         strcat(str, "133MHz");
15209                 else if (clock_ctrl == 0)
15210                         strcat(str, "33MHz");
15211                 else if (clock_ctrl == 2)
15212                         strcat(str, "50MHz");
15213                 else if (clock_ctrl == 4)
15214                         strcat(str, "66MHz");
15215                 else if (clock_ctrl == 6)
15216                         strcat(str, "100MHz");
15217         } else {
15218                 strcpy(str, "PCI:");
15219                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15220                         strcat(str, "66MHz");
15221                 else
15222                         strcat(str, "33MHz");
15223         }
15224         if (tg3_flag(tp, PCI_32BIT))
15225                 strcat(str, ":32-bit");
15226         else
15227                 strcat(str, ":64-bit");
15228         return str;
15229 }
15230
15231 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15232 {
15233         struct pci_dev *peer;
15234         unsigned int func, devnr = tp->pdev->devfn & ~7;
15235
15236         for (func = 0; func < 8; func++) {
15237                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15238                 if (peer && peer != tp->pdev)
15239                         break;
15240                 pci_dev_put(peer);
15241         }
15242         /* 5704 can be configured in single-port mode, set peer to
15243          * tp->pdev in that case.
15244          */
15245         if (!peer) {
15246                 peer = tp->pdev;
15247                 return peer;
15248         }
15249
15250         /*
15251          * We don't need to keep the refcount elevated; there's no way
15252          * to remove one half of this device without removing the other
15253          */
15254         pci_dev_put(peer);
15255
15256         return peer;
15257 }
15258
15259 static void __devinit tg3_init_coal(struct tg3 *tp)
15260 {
15261         struct ethtool_coalesce *ec = &tp->coal;
15262
15263         memset(ec, 0, sizeof(*ec));
15264         ec->cmd = ETHTOOL_GCOALESCE;
15265         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15266         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15267         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15268         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15269         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15270         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15271         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15272         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15273         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15274
15275         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15276                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15277                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15278                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15279                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15280                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15281         }
15282
15283         if (tg3_flag(tp, 5705_PLUS)) {
15284                 ec->rx_coalesce_usecs_irq = 0;
15285                 ec->tx_coalesce_usecs_irq = 0;
15286                 ec->stats_block_coalesce_usecs = 0;
15287         }
15288 }
15289
15290 static const struct net_device_ops tg3_netdev_ops = {
15291         .ndo_open               = tg3_open,
15292         .ndo_stop               = tg3_close,
15293         .ndo_start_xmit         = tg3_start_xmit,
15294         .ndo_get_stats64        = tg3_get_stats64,
15295         .ndo_validate_addr      = eth_validate_addr,
15296         .ndo_set_rx_mode        = tg3_set_rx_mode,
15297         .ndo_set_mac_address    = tg3_set_mac_addr,
15298         .ndo_do_ioctl           = tg3_ioctl,
15299         .ndo_tx_timeout         = tg3_tx_timeout,
15300         .ndo_change_mtu         = tg3_change_mtu,
15301         .ndo_fix_features       = tg3_fix_features,
15302         .ndo_set_features       = tg3_set_features,
15303 #ifdef CONFIG_NET_POLL_CONTROLLER
15304         .ndo_poll_controller    = tg3_poll_controller,
15305 #endif
15306 };
15307
15308 static int __devinit tg3_init_one(struct pci_dev *pdev,
15309                                   const struct pci_device_id *ent)
15310 {
15311         struct net_device *dev;
15312         struct tg3 *tp;
15313         int i, err, pm_cap;
15314         u32 sndmbx, rcvmbx, intmbx;
15315         char str[40];
15316         u64 dma_mask, persist_dma_mask;
15317         netdev_features_t features = 0;
15318
15319         printk_once(KERN_INFO "%s\n", version);
15320
15321         err = pci_enable_device(pdev);
15322         if (err) {
15323                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15324                 return err;
15325         }
15326
15327         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15328         if (err) {
15329                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15330                 goto err_out_disable_pdev;
15331         }
15332
15333         pci_set_master(pdev);
15334
15335         /* Find power-management capability. */
15336         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15337         if (pm_cap == 0) {
15338                 dev_err(&pdev->dev,
15339                         "Cannot find Power Management capability, aborting\n");
15340                 err = -EIO;
15341                 goto err_out_free_res;
15342         }
15343
15344         err = pci_set_power_state(pdev, PCI_D0);
15345         if (err) {
15346                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15347                 goto err_out_free_res;
15348         }
15349
15350         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15351         if (!dev) {
15352                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15353                 err = -ENOMEM;
15354                 goto err_out_power_down;
15355         }
15356
15357         SET_NETDEV_DEV(dev, &pdev->dev);
15358
15359         tp = netdev_priv(dev);
15360         tp->pdev = pdev;
15361         tp->dev = dev;
15362         tp->pm_cap = pm_cap;
15363         tp->rx_mode = TG3_DEF_RX_MODE;
15364         tp->tx_mode = TG3_DEF_TX_MODE;
15365
15366         if (tg3_debug > 0)
15367                 tp->msg_enable = tg3_debug;
15368         else
15369                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15370
15371         /* The word/byte swap controls here control register access byte
15372          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15373          * setting below.
15374          */
15375         tp->misc_host_ctrl =
15376                 MISC_HOST_CTRL_MASK_PCI_INT |
15377                 MISC_HOST_CTRL_WORD_SWAP |
15378                 MISC_HOST_CTRL_INDIR_ACCESS |
15379                 MISC_HOST_CTRL_PCISTATE_RW;
15380
15381         /* The NONFRM (non-frame) byte/word swap controls take effect
15382          * on descriptor entries, anything which isn't packet data.
15383          *
15384          * The StrongARM chips on the board (one for tx, one for rx)
15385          * are running in big-endian mode.
15386          */
15387         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15388                         GRC_MODE_WSWAP_NONFRM_DATA);
15389 #ifdef __BIG_ENDIAN
15390         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15391 #endif
15392         spin_lock_init(&tp->lock);
15393         spin_lock_init(&tp->indirect_lock);
15394         INIT_WORK(&tp->reset_task, tg3_reset_task);
15395
15396         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15397         if (!tp->regs) {
15398                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15399                 err = -ENOMEM;
15400                 goto err_out_free_dev;
15401         }
15402
15403         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15404             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15405             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15406             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15407             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15408             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15409             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15410             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15411                 tg3_flag_set(tp, ENABLE_APE);
15412                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15413                 if (!tp->aperegs) {
15414                         dev_err(&pdev->dev,
15415                                 "Cannot map APE registers, aborting\n");
15416                         err = -ENOMEM;
15417                         goto err_out_iounmap;
15418                 }
15419         }
15420
15421         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15422         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15423
15424         dev->ethtool_ops = &tg3_ethtool_ops;
15425         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15426         dev->netdev_ops = &tg3_netdev_ops;
15427         dev->irq = pdev->irq;
15428
15429         err = tg3_get_invariants(tp);
15430         if (err) {
15431                 dev_err(&pdev->dev,
15432                         "Problem fetching invariants of chip, aborting\n");
15433                 goto err_out_apeunmap;
15434         }
15435
15436         /* The EPB bridge inside 5714, 5715, and 5780 and any
15437          * device behind the EPB cannot support DMA addresses > 40-bit.
15438          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15439          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15440          * do DMA address check in tg3_start_xmit().
15441          */
15442         if (tg3_flag(tp, IS_5788))
15443                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15444         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15445                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15446 #ifdef CONFIG_HIGHMEM
15447                 dma_mask = DMA_BIT_MASK(64);
15448 #endif
15449         } else
15450                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15451
15452         /* Configure DMA attributes. */
15453         if (dma_mask > DMA_BIT_MASK(32)) {
15454                 err = pci_set_dma_mask(pdev, dma_mask);
15455                 if (!err) {
15456                         features |= NETIF_F_HIGHDMA;
15457                         err = pci_set_consistent_dma_mask(pdev,
15458                                                           persist_dma_mask);
15459                         if (err < 0) {
15460                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15461                                         "DMA for consistent allocations\n");
15462                                 goto err_out_apeunmap;
15463                         }
15464                 }
15465         }
15466         if (err || dma_mask == DMA_BIT_MASK(32)) {
15467                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15468                 if (err) {
15469                         dev_err(&pdev->dev,
15470                                 "No usable DMA configuration, aborting\n");
15471                         goto err_out_apeunmap;
15472                 }
15473         }
15474
15475         tg3_init_bufmgr_config(tp);
15476
15477         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15478
15479         /* 5700 B0 chips do not support checksumming correctly due
15480          * to hardware bugs.
15481          */
15482         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15483                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15484
15485                 if (tg3_flag(tp, 5755_PLUS))
15486                         features |= NETIF_F_IPV6_CSUM;
15487         }
15488
15489         /* TSO is on by default on chips that support hardware TSO.
15490          * Firmware TSO on older chips gives lower performance, so it
15491          * is off by default, but can be enabled using ethtool.
15492          */
15493         if ((tg3_flag(tp, HW_TSO_1) ||
15494              tg3_flag(tp, HW_TSO_2) ||
15495              tg3_flag(tp, HW_TSO_3)) &&
15496             (features & NETIF_F_IP_CSUM))
15497                 features |= NETIF_F_TSO;
15498         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15499                 if (features & NETIF_F_IPV6_CSUM)
15500                         features |= NETIF_F_TSO6;
15501                 if (tg3_flag(tp, HW_TSO_3) ||
15502                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15503                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15504                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15505                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15506                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15507                         features |= NETIF_F_TSO_ECN;
15508         }
15509
15510         dev->features |= features;
15511         dev->vlan_features |= features;
15512
15513         /*
15514          * Add loopback capability only for a subset of devices that support
15515          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15516          * loopback for the remaining devices.
15517          */
15518         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15519             !tg3_flag(tp, CPMU_PRESENT))
15520                 /* Add the loopback capability */
15521                 features |= NETIF_F_LOOPBACK;
15522
15523         dev->hw_features |= features;
15524
15525         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15526             !tg3_flag(tp, TSO_CAPABLE) &&
15527             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15528                 tg3_flag_set(tp, MAX_RXPEND_64);
15529                 tp->rx_pending = 63;
15530         }
15531
15532         err = tg3_get_device_address(tp);
15533         if (err) {
15534                 dev_err(&pdev->dev,
15535                         "Could not obtain valid ethernet address, aborting\n");
15536                 goto err_out_apeunmap;
15537         }
15538
15539         /*
15540          * Reset chip in case UNDI or EFI driver did not shutdown
15541          * DMA self test will enable WDMAC and we'll see (spurious)
15542          * pending DMA on the PCI bus at that point.
15543          */
15544         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15545             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15546                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15547                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15548         }
15549
15550         err = tg3_test_dma(tp);
15551         if (err) {
15552                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15553                 goto err_out_apeunmap;
15554         }
15555
15556         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15557         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15558         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15559         for (i = 0; i < tp->irq_max; i++) {
15560                 struct tg3_napi *tnapi = &tp->napi[i];
15561
15562                 tnapi->tp = tp;
15563                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15564
15565                 tnapi->int_mbox = intmbx;
15566                 if (i <= 4)
15567                         intmbx += 0x8;
15568                 else
15569                         intmbx += 0x4;
15570
15571                 tnapi->consmbox = rcvmbx;
15572                 tnapi->prodmbox = sndmbx;
15573
15574                 if (i)
15575                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15576                 else
15577                         tnapi->coal_now = HOSTCC_MODE_NOW;
15578
15579                 if (!tg3_flag(tp, SUPPORT_MSIX))
15580                         break;
15581
15582                 /*
15583                  * If we support MSIX, we'll be using RSS.  If we're using
15584                  * RSS, the first vector only handles link interrupts and the
15585                  * remaining vectors handle rx and tx interrupts.  Reuse the
15586                  * mailbox values for the next iteration.  The values we setup
15587                  * above are still useful for the single vectored mode.
15588                  */
15589                 if (!i)
15590                         continue;
15591
15592                 rcvmbx += 0x8;
15593
15594                 if (sndmbx & 0x4)
15595                         sndmbx -= 0x4;
15596                 else
15597                         sndmbx += 0xc;
15598         }
15599
15600         tg3_init_coal(tp);
15601
15602         pci_set_drvdata(pdev, dev);
15603
15604         if (tg3_flag(tp, 5717_PLUS)) {
15605                 /* Resume a low-power mode */
15606                 tg3_frob_aux_power(tp, false);
15607         }
15608
15609         err = register_netdev(dev);
15610         if (err) {
15611                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15612                 goto err_out_apeunmap;
15613         }
15614
15615         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15616                     tp->board_part_number,
15617                     tp->pci_chip_rev_id,
15618                     tg3_bus_string(tp, str),
15619                     dev->dev_addr);
15620
15621         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15622                 struct phy_device *phydev;
15623                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15624                 netdev_info(dev,
15625                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15626                             phydev->drv->name, dev_name(&phydev->dev));
15627         } else {
15628                 char *ethtype;
15629
15630                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15631                         ethtype = "10/100Base-TX";
15632                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15633                         ethtype = "1000Base-SX";
15634                 else
15635                         ethtype = "10/100/1000Base-T";
15636
15637                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15638                             "(WireSpeed[%d], EEE[%d])\n",
15639                             tg3_phy_string(tp), ethtype,
15640                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15641                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15642         }
15643
15644         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15645                     (dev->features & NETIF_F_RXCSUM) != 0,
15646                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15647                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15648                     tg3_flag(tp, ENABLE_ASF) != 0,
15649                     tg3_flag(tp, TSO_CAPABLE) != 0);
15650         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15651                     tp->dma_rwctrl,
15652                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15653                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15654
15655         pci_save_state(pdev);
15656
15657         return 0;
15658
15659 err_out_apeunmap:
15660         if (tp->aperegs) {
15661                 iounmap(tp->aperegs);
15662                 tp->aperegs = NULL;
15663         }
15664
15665 err_out_iounmap:
15666         if (tp->regs) {
15667                 iounmap(tp->regs);
15668                 tp->regs = NULL;
15669         }
15670
15671 err_out_free_dev:
15672         free_netdev(dev);
15673
15674 err_out_power_down:
15675         pci_set_power_state(pdev, PCI_D3hot);
15676
15677 err_out_free_res:
15678         pci_release_regions(pdev);
15679
15680 err_out_disable_pdev:
15681         pci_disable_device(pdev);
15682         pci_set_drvdata(pdev, NULL);
15683         return err;
15684 }
15685
15686 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15687 {
15688         struct net_device *dev = pci_get_drvdata(pdev);
15689
15690         if (dev) {
15691                 struct tg3 *tp = netdev_priv(dev);
15692
15693                 if (tp->fw)
15694                         release_firmware(tp->fw);
15695
15696                 tg3_reset_task_cancel(tp);
15697
15698                 if (tg3_flag(tp, USE_PHYLIB)) {
15699                         tg3_phy_fini(tp);
15700                         tg3_mdio_fini(tp);
15701                 }
15702
15703                 unregister_netdev(dev);
15704                 if (tp->aperegs) {
15705                         iounmap(tp->aperegs);
15706                         tp->aperegs = NULL;
15707                 }
15708                 if (tp->regs) {
15709                         iounmap(tp->regs);
15710                         tp->regs = NULL;
15711                 }
15712                 free_netdev(dev);
15713                 pci_release_regions(pdev);
15714                 pci_disable_device(pdev);
15715                 pci_set_drvdata(pdev, NULL);
15716         }
15717 }
15718
15719 #ifdef CONFIG_PM_SLEEP
15720 static int tg3_suspend(struct device *device)
15721 {
15722         struct pci_dev *pdev = to_pci_dev(device);
15723         struct net_device *dev = pci_get_drvdata(pdev);
15724         struct tg3 *tp = netdev_priv(dev);
15725         int err;
15726
15727         if (!netif_running(dev))
15728                 return 0;
15729
15730         tg3_reset_task_cancel(tp);
15731         tg3_phy_stop(tp);
15732         tg3_netif_stop(tp);
15733
15734         del_timer_sync(&tp->timer);
15735
15736         tg3_full_lock(tp, 1);
15737         tg3_disable_ints(tp);
15738         tg3_full_unlock(tp);
15739
15740         netif_device_detach(dev);
15741
15742         tg3_full_lock(tp, 0);
15743         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15744         tg3_flag_clear(tp, INIT_COMPLETE);
15745         tg3_full_unlock(tp);
15746
15747         err = tg3_power_down_prepare(tp);
15748         if (err) {
15749                 int err2;
15750
15751                 tg3_full_lock(tp, 0);
15752
15753                 tg3_flag_set(tp, INIT_COMPLETE);
15754                 err2 = tg3_restart_hw(tp, 1);
15755                 if (err2)
15756                         goto out;
15757
15758                 tp->timer.expires = jiffies + tp->timer_offset;
15759                 add_timer(&tp->timer);
15760
15761                 netif_device_attach(dev);
15762                 tg3_netif_start(tp);
15763
15764 out:
15765                 tg3_full_unlock(tp);
15766
15767                 if (!err2)
15768                         tg3_phy_start(tp);
15769         }
15770
15771         return err;
15772 }
15773
15774 static int tg3_resume(struct device *device)
15775 {
15776         struct pci_dev *pdev = to_pci_dev(device);
15777         struct net_device *dev = pci_get_drvdata(pdev);
15778         struct tg3 *tp = netdev_priv(dev);
15779         int err;
15780
15781         if (!netif_running(dev))
15782                 return 0;
15783
15784         netif_device_attach(dev);
15785
15786         tg3_full_lock(tp, 0);
15787
15788         tg3_flag_set(tp, INIT_COMPLETE);
15789         err = tg3_restart_hw(tp, 1);
15790         if (err)
15791                 goto out;
15792
15793         tp->timer.expires = jiffies + tp->timer_offset;
15794         add_timer(&tp->timer);
15795
15796         tg3_netif_start(tp);
15797
15798 out:
15799         tg3_full_unlock(tp);
15800
15801         if (!err)
15802                 tg3_phy_start(tp);
15803
15804         return err;
15805 }
15806
15807 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15808 #define TG3_PM_OPS (&tg3_pm_ops)
15809
15810 #else
15811
15812 #define TG3_PM_OPS NULL
15813
15814 #endif /* CONFIG_PM_SLEEP */
15815
15816 /**
15817  * tg3_io_error_detected - called when PCI error is detected
15818  * @pdev: Pointer to PCI device
15819  * @state: The current pci connection state
15820  *
15821  * This function is called after a PCI bus error affecting
15822  * this device has been detected.
15823  */
15824 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15825                                               pci_channel_state_t state)
15826 {
15827         struct net_device *netdev = pci_get_drvdata(pdev);
15828         struct tg3 *tp = netdev_priv(netdev);
15829         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15830
15831         netdev_info(netdev, "PCI I/O error detected\n");
15832
15833         rtnl_lock();
15834
15835         if (!netif_running(netdev))
15836                 goto done;
15837
15838         tg3_phy_stop(tp);
15839
15840         tg3_netif_stop(tp);
15841
15842         del_timer_sync(&tp->timer);
15843
15844         /* Want to make sure that the reset task doesn't run */
15845         tg3_reset_task_cancel(tp);
15846         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15847
15848         netif_device_detach(netdev);
15849
15850         /* Clean up software state, even if MMIO is blocked */
15851         tg3_full_lock(tp, 0);
15852         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15853         tg3_full_unlock(tp);
15854
15855 done:
15856         if (state == pci_channel_io_perm_failure)
15857                 err = PCI_ERS_RESULT_DISCONNECT;
15858         else
15859                 pci_disable_device(pdev);
15860
15861         rtnl_unlock();
15862
15863         return err;
15864 }
15865
15866 /**
15867  * tg3_io_slot_reset - called after the pci bus has been reset.
15868  * @pdev: Pointer to PCI device
15869  *
15870  * Restart the card from scratch, as if from a cold-boot.
15871  * At this point, the card has exprienced a hard reset,
15872  * followed by fixups by BIOS, and has its config space
15873  * set up identically to what it was at cold boot.
15874  */
15875 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15876 {
15877         struct net_device *netdev = pci_get_drvdata(pdev);
15878         struct tg3 *tp = netdev_priv(netdev);
15879         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15880         int err;
15881
15882         rtnl_lock();
15883
15884         if (pci_enable_device(pdev)) {
15885                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15886                 goto done;
15887         }
15888
15889         pci_set_master(pdev);
15890         pci_restore_state(pdev);
15891         pci_save_state(pdev);
15892
15893         if (!netif_running(netdev)) {
15894                 rc = PCI_ERS_RESULT_RECOVERED;
15895                 goto done;
15896         }
15897
15898         err = tg3_power_up(tp);
15899         if (err)
15900                 goto done;
15901
15902         rc = PCI_ERS_RESULT_RECOVERED;
15903
15904 done:
15905         rtnl_unlock();
15906
15907         return rc;
15908 }
15909
15910 /**
15911  * tg3_io_resume - called when traffic can start flowing again.
15912  * @pdev: Pointer to PCI device
15913  *
15914  * This callback is called when the error recovery driver tells
15915  * us that its OK to resume normal operation.
15916  */
15917 static void tg3_io_resume(struct pci_dev *pdev)
15918 {
15919         struct net_device *netdev = pci_get_drvdata(pdev);
15920         struct tg3 *tp = netdev_priv(netdev);
15921         int err;
15922
15923         rtnl_lock();
15924
15925         if (!netif_running(netdev))
15926                 goto done;
15927
15928         tg3_full_lock(tp, 0);
15929         tg3_flag_set(tp, INIT_COMPLETE);
15930         err = tg3_restart_hw(tp, 1);
15931         tg3_full_unlock(tp);
15932         if (err) {
15933                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15934                 goto done;
15935         }
15936
15937         netif_device_attach(netdev);
15938
15939         tp->timer.expires = jiffies + tp->timer_offset;
15940         add_timer(&tp->timer);
15941
15942         tg3_netif_start(tp);
15943
15944         tg3_phy_start(tp);
15945
15946 done:
15947         rtnl_unlock();
15948 }
15949
15950 static struct pci_error_handlers tg3_err_handler = {
15951         .error_detected = tg3_io_error_detected,
15952         .slot_reset     = tg3_io_slot_reset,
15953         .resume         = tg3_io_resume
15954 };
15955
15956 static struct pci_driver tg3_driver = {
15957         .name           = DRV_MODULE_NAME,
15958         .id_table       = tg3_pci_tbl,
15959         .probe          = tg3_init_one,
15960         .remove         = __devexit_p(tg3_remove_one),
15961         .err_handler    = &tg3_err_handler,
15962         .driver.pm      = TG3_PM_OPS,
15963 };
15964
15965 static int __init tg3_init(void)
15966 {
15967         return pci_register_driver(&tg3_driver);
15968 }
15969
15970 static void __exit tg3_cleanup(void)
15971 {
15972         pci_unregister_driver(&tg3_driver);
15973 }
15974
15975 module_init(tg3_init);
15976 module_exit(tg3_cleanup);