Merge tag 'for-linus-3.11-merge-window-part-1' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     132
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "May 21, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 if (pci_channel_offline(tp->pdev))
748                         break;
749
750                 udelay(10);
751         }
752
753         if (status != bit) {
754                 /* Revoke the lock request. */
755                 tg3_ape_write32(tp, gnt + off, bit);
756                 ret = -EBUSY;
757         }
758
759         return ret;
760 }
761
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 {
764         u32 gnt, bit;
765
766         if (!tg3_flag(tp, ENABLE_APE))
767                 return;
768
769         switch (locknum) {
770         case TG3_APE_LOCK_GPIO:
771                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
772                         return;
773         case TG3_APE_LOCK_GRC:
774         case TG3_APE_LOCK_MEM:
775                 if (!tp->pci_fn)
776                         bit = APE_LOCK_GRANT_DRIVER;
777                 else
778                         bit = 1 << tp->pci_fn;
779                 break;
780         case TG3_APE_LOCK_PHY0:
781         case TG3_APE_LOCK_PHY1:
782         case TG3_APE_LOCK_PHY2:
783         case TG3_APE_LOCK_PHY3:
784                 bit = APE_LOCK_GRANT_DRIVER;
785                 break;
786         default:
787                 return;
788         }
789
790         if (tg3_asic_rev(tp) == ASIC_REV_5761)
791                 gnt = TG3_APE_LOCK_GRANT;
792         else
793                 gnt = TG3_APE_PER_LOCK_GRANT;
794
795         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
796 }
797
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 {
800         u32 apedata;
801
802         while (timeout_us) {
803                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
804                         return -EBUSY;
805
806                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
808                         break;
809
810                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
811
812                 udelay(10);
813                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
814         }
815
816         return timeout_us ? 0 : -EBUSY;
817 }
818
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 {
821         u32 i, apedata;
822
823         for (i = 0; i < timeout_us / 10; i++) {
824                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825
826                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
827                         break;
828
829                 udelay(10);
830         }
831
832         return i == timeout_us / 10;
833 }
834
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836                                    u32 len)
837 {
838         int err;
839         u32 i, bufoff, msgoff, maxlen, apedata;
840
841         if (!tg3_flag(tp, APE_HAS_NCSI))
842                 return 0;
843
844         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845         if (apedata != APE_SEG_SIG_MAGIC)
846                 return -ENODEV;
847
848         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849         if (!(apedata & APE_FW_STATUS_READY))
850                 return -EAGAIN;
851
852         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
853                  TG3_APE_SHMEM_BASE;
854         msgoff = bufoff + 2 * sizeof(u32);
855         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
856
857         while (len) {
858                 u32 length;
859
860                 /* Cap xfer sizes to scratchpad limits. */
861                 length = (len > maxlen) ? maxlen : len;
862                 len -= length;
863
864                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865                 if (!(apedata & APE_FW_STATUS_READY))
866                         return -EAGAIN;
867
868                 /* Wait for up to 1 msec for APE to service previous event. */
869                 err = tg3_ape_event_lock(tp, 1000);
870                 if (err)
871                         return err;
872
873                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874                           APE_EVENT_STATUS_SCRTCHPD_READ |
875                           APE_EVENT_STATUS_EVENT_PENDING;
876                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
877
878                 tg3_ape_write32(tp, bufoff, base_off);
879                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
880
881                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883
884                 base_off += length;
885
886                 if (tg3_ape_wait_for_event(tp, 30000))
887                         return -EAGAIN;
888
889                 for (i = 0; length; i += 4, length -= 4) {
890                         u32 val = tg3_ape_read32(tp, msgoff + i);
891                         memcpy(data, &val, sizeof(u32));
892                         data++;
893                 }
894         }
895
896         return 0;
897 }
898
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
900 {
901         int err;
902         u32 apedata;
903
904         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905         if (apedata != APE_SEG_SIG_MAGIC)
906                 return -EAGAIN;
907
908         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909         if (!(apedata & APE_FW_STATUS_READY))
910                 return -EAGAIN;
911
912         /* Wait for up to 1 millisecond for APE to service previous event. */
913         err = tg3_ape_event_lock(tp, 1000);
914         if (err)
915                 return err;
916
917         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918                         event | APE_EVENT_STATUS_EVENT_PENDING);
919
920         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
922
923         return 0;
924 }
925
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
927 {
928         u32 event;
929         u32 apedata;
930
931         if (!tg3_flag(tp, ENABLE_APE))
932                 return;
933
934         switch (kind) {
935         case RESET_KIND_INIT:
936                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937                                 APE_HOST_SEG_SIG_MAGIC);
938                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939                                 APE_HOST_SEG_LEN_MAGIC);
940                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945                                 APE_HOST_BEHAV_NO_PHYLOCK);
946                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947                                     TG3_APE_HOST_DRVR_STATE_START);
948
949                 event = APE_EVENT_STATUS_STATE_START;
950                 break;
951         case RESET_KIND_SHUTDOWN:
952                 /* With the interface we are currently using,
953                  * APE does not track driver state.  Wiping
954                  * out the HOST SEGMENT SIGNATURE forces
955                  * the APE to assume OS absent status.
956                  */
957                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
958
959                 if (device_may_wakeup(&tp->pdev->dev) &&
960                     tg3_flag(tp, WOL_ENABLE)) {
961                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962                                             TG3_APE_HOST_WOL_SPEED_AUTO);
963                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
964                 } else
965                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
966
967                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
968
969                 event = APE_EVENT_STATUS_STATE_UNLOAD;
970                 break;
971         case RESET_KIND_SUSPEND:
972                 event = APE_EVENT_STATUS_STATE_SUSPEND;
973                 break;
974         default:
975                 return;
976         }
977
978         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
979
980         tg3_ape_send_event(tp, event);
981 }
982
983 static void tg3_disable_ints(struct tg3 *tp)
984 {
985         int i;
986
987         tw32(TG3PCI_MISC_HOST_CTRL,
988              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
989         for (i = 0; i < tp->irq_max; i++)
990                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
991 }
992
993 static void tg3_enable_ints(struct tg3 *tp)
994 {
995         int i;
996
997         tp->irq_sync = 0;
998         wmb();
999
1000         tw32(TG3PCI_MISC_HOST_CTRL,
1001              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1002
1003         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1004         for (i = 0; i < tp->irq_cnt; i++) {
1005                 struct tg3_napi *tnapi = &tp->napi[i];
1006
1007                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008                 if (tg3_flag(tp, 1SHOT_MSI))
1009                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010
1011                 tp->coal_now |= tnapi->coal_now;
1012         }
1013
1014         /* Force an initial interrupt */
1015         if (!tg3_flag(tp, TAGGED_STATUS) &&
1016             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1017                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1018         else
1019                 tw32(HOSTCC_MODE, tp->coal_now);
1020
1021         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1022 }
1023
1024 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1025 {
1026         struct tg3 *tp = tnapi->tp;
1027         struct tg3_hw_status *sblk = tnapi->hw_status;
1028         unsigned int work_exists = 0;
1029
1030         /* check for phy events */
1031         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1032                 if (sblk->status & SD_STATUS_LINK_CHG)
1033                         work_exists = 1;
1034         }
1035
1036         /* check for TX work to do */
1037         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1038                 work_exists = 1;
1039
1040         /* check for RX work to do */
1041         if (tnapi->rx_rcb_prod_idx &&
1042             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1043                 work_exists = 1;
1044
1045         return work_exists;
1046 }
1047
1048 /* tg3_int_reenable
1049  *  similar to tg3_enable_ints, but it accurately determines whether there
1050  *  is new work pending and can return without flushing the PIO write
1051  *  which reenables interrupts
1052  */
1053 static void tg3_int_reenable(struct tg3_napi *tnapi)
1054 {
1055         struct tg3 *tp = tnapi->tp;
1056
1057         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1058         mmiowb();
1059
1060         /* When doing tagged status, this work check is unnecessary.
1061          * The last_tag we write above tells the chip which piece of
1062          * work we've completed.
1063          */
1064         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1065                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1066                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1067 }
1068
1069 static void tg3_switch_clocks(struct tg3 *tp)
1070 {
1071         u32 clock_ctrl;
1072         u32 orig_clock_ctrl;
1073
1074         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1075                 return;
1076
1077         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1078
1079         orig_clock_ctrl = clock_ctrl;
1080         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1081                        CLOCK_CTRL_CLKRUN_OENABLE |
1082                        0x1f);
1083         tp->pci_clock_ctrl = clock_ctrl;
1084
1085         if (tg3_flag(tp, 5705_PLUS)) {
1086                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1087                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1089                 }
1090         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1091                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092                             clock_ctrl |
1093                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1094                             40);
1095                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1096                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1097                             40);
1098         }
1099         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1100 }
1101
1102 #define PHY_BUSY_LOOPS  5000
1103
1104 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1105                          u32 *val)
1106 {
1107         u32 frame_val;
1108         unsigned int loops;
1109         int ret;
1110
1111         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1112                 tw32_f(MAC_MI_MODE,
1113                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114                 udelay(80);
1115         }
1116
1117         tg3_ape_lock(tp, tp->phy_ape_lock);
1118
1119         *val = 0x0;
1120
1121         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1122                       MI_COM_PHY_ADDR_MASK);
1123         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1124                       MI_COM_REG_ADDR_MASK);
1125         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1126
1127         tw32_f(MAC_MI_COM, frame_val);
1128
1129         loops = PHY_BUSY_LOOPS;
1130         while (loops != 0) {
1131                 udelay(10);
1132                 frame_val = tr32(MAC_MI_COM);
1133
1134                 if ((frame_val & MI_COM_BUSY) == 0) {
1135                         udelay(5);
1136                         frame_val = tr32(MAC_MI_COM);
1137                         break;
1138                 }
1139                 loops -= 1;
1140         }
1141
1142         ret = -EBUSY;
1143         if (loops != 0) {
1144                 *val = frame_val & MI_COM_DATA_MASK;
1145                 ret = 0;
1146         }
1147
1148         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1149                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150                 udelay(80);
1151         }
1152
1153         tg3_ape_unlock(tp, tp->phy_ape_lock);
1154
1155         return ret;
1156 }
1157
1158 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1159 {
1160         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1161 }
1162
1163 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1164                           u32 val)
1165 {
1166         u32 frame_val;
1167         unsigned int loops;
1168         int ret;
1169
1170         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1171             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1172                 return 0;
1173
1174         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1175                 tw32_f(MAC_MI_MODE,
1176                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177                 udelay(80);
1178         }
1179
1180         tg3_ape_lock(tp, tp->phy_ape_lock);
1181
1182         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1183                       MI_COM_PHY_ADDR_MASK);
1184         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1185                       MI_COM_REG_ADDR_MASK);
1186         frame_val |= (val & MI_COM_DATA_MASK);
1187         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1188
1189         tw32_f(MAC_MI_COM, frame_val);
1190
1191         loops = PHY_BUSY_LOOPS;
1192         while (loops != 0) {
1193                 udelay(10);
1194                 frame_val = tr32(MAC_MI_COM);
1195                 if ((frame_val & MI_COM_BUSY) == 0) {
1196                         udelay(5);
1197                         frame_val = tr32(MAC_MI_COM);
1198                         break;
1199                 }
1200                 loops -= 1;
1201         }
1202
1203         ret = -EBUSY;
1204         if (loops != 0)
1205                 ret = 0;
1206
1207         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1208                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209                 udelay(80);
1210         }
1211
1212         tg3_ape_unlock(tp, tp->phy_ape_lock);
1213
1214         return ret;
1215 }
1216
1217 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1218 {
1219         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1220 }
1221
1222 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 {
1224         int err;
1225
1226         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227         if (err)
1228                 goto done;
1229
1230         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231         if (err)
1232                 goto done;
1233
1234         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1235                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236         if (err)
1237                 goto done;
1238
1239         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1240
1241 done:
1242         return err;
1243 }
1244
1245 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 {
1247         int err;
1248
1249         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250         if (err)
1251                 goto done;
1252
1253         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254         if (err)
1255                 goto done;
1256
1257         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259         if (err)
1260                 goto done;
1261
1262         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1263
1264 done:
1265         return err;
1266 }
1267
1268 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 {
1270         int err;
1271
1272         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1273         if (!err)
1274                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1275
1276         return err;
1277 }
1278
1279 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 {
1281         int err;
1282
1283         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284         if (!err)
1285                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1286
1287         return err;
1288 }
1289
1290 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 {
1292         int err;
1293
1294         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1295                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1296                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1297         if (!err)
1298                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1299
1300         return err;
1301 }
1302
1303 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1304 {
1305         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1306                 set |= MII_TG3_AUXCTL_MISC_WREN;
1307
1308         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1309 }
1310
1311 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1312 {
1313         u32 val;
1314         int err;
1315
1316         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1317
1318         if (err)
1319                 return err;
1320         if (enable)
1321
1322                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323         else
1324                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325
1326         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1327                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1328
1329         return err;
1330 }
1331
1332 static int tg3_bmcr_reset(struct tg3 *tp)
1333 {
1334         u32 phy_control;
1335         int limit, err;
1336
1337         /* OK, reset it, and poll the BMCR_RESET bit until it
1338          * clears or we time out.
1339          */
1340         phy_control = BMCR_RESET;
1341         err = tg3_writephy(tp, MII_BMCR, phy_control);
1342         if (err != 0)
1343                 return -EBUSY;
1344
1345         limit = 5000;
1346         while (limit--) {
1347                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348                 if (err != 0)
1349                         return -EBUSY;
1350
1351                 if ((phy_control & BMCR_RESET) == 0) {
1352                         udelay(40);
1353                         break;
1354                 }
1355                 udelay(10);
1356         }
1357         if (limit < 0)
1358                 return -EBUSY;
1359
1360         return 0;
1361 }
1362
1363 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1364 {
1365         struct tg3 *tp = bp->priv;
1366         u32 val;
1367
1368         spin_lock_bh(&tp->lock);
1369
1370         if (tg3_readphy(tp, reg, &val))
1371                 val = -EIO;
1372
1373         spin_unlock_bh(&tp->lock);
1374
1375         return val;
1376 }
1377
1378 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1379 {
1380         struct tg3 *tp = bp->priv;
1381         u32 ret = 0;
1382
1383         spin_lock_bh(&tp->lock);
1384
1385         if (tg3_writephy(tp, reg, val))
1386                 ret = -EIO;
1387
1388         spin_unlock_bh(&tp->lock);
1389
1390         return ret;
1391 }
1392
1393 static int tg3_mdio_reset(struct mii_bus *bp)
1394 {
1395         return 0;
1396 }
1397
1398 static void tg3_mdio_config_5785(struct tg3 *tp)
1399 {
1400         u32 val;
1401         struct phy_device *phydev;
1402
1403         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1404         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1405         case PHY_ID_BCM50610:
1406         case PHY_ID_BCM50610M:
1407                 val = MAC_PHYCFG2_50610_LED_MODES;
1408                 break;
1409         case PHY_ID_BCMAC131:
1410                 val = MAC_PHYCFG2_AC131_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8211C:
1413                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1414                 break;
1415         case PHY_ID_RTL8201E:
1416                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1417                 break;
1418         default:
1419                 return;
1420         }
1421
1422         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1423                 tw32(MAC_PHYCFG2, val);
1424
1425                 val = tr32(MAC_PHYCFG1);
1426                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1427                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1428                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1429                 tw32(MAC_PHYCFG1, val);
1430
1431                 return;
1432         }
1433
1434         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1435                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1436                        MAC_PHYCFG2_FMODE_MASK_MASK |
1437                        MAC_PHYCFG2_GMODE_MASK_MASK |
1438                        MAC_PHYCFG2_ACT_MASK_MASK   |
1439                        MAC_PHYCFG2_QUAL_MASK_MASK |
1440                        MAC_PHYCFG2_INBAND_ENABLE;
1441
1442         tw32(MAC_PHYCFG2, val);
1443
1444         val = tr32(MAC_PHYCFG1);
1445         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1446                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1447         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1448                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1449                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1450                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1451                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1452         }
1453         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1454                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1455         tw32(MAC_PHYCFG1, val);
1456
1457         val = tr32(MAC_EXT_RGMII_MODE);
1458         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1459                  MAC_RGMII_MODE_RX_QUALITY |
1460                  MAC_RGMII_MODE_RX_ACTIVITY |
1461                  MAC_RGMII_MODE_RX_ENG_DET |
1462                  MAC_RGMII_MODE_TX_ENABLE |
1463                  MAC_RGMII_MODE_TX_LOWPWR |
1464                  MAC_RGMII_MODE_TX_RESET);
1465         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467                         val |= MAC_RGMII_MODE_RX_INT_B |
1468                                MAC_RGMII_MODE_RX_QUALITY |
1469                                MAC_RGMII_MODE_RX_ACTIVITY |
1470                                MAC_RGMII_MODE_RX_ENG_DET;
1471                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1472                         val |= MAC_RGMII_MODE_TX_ENABLE |
1473                                MAC_RGMII_MODE_TX_LOWPWR |
1474                                MAC_RGMII_MODE_TX_RESET;
1475         }
1476         tw32(MAC_EXT_RGMII_MODE, val);
1477 }
1478
1479 static void tg3_mdio_start(struct tg3 *tp)
1480 {
1481         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1482         tw32_f(MAC_MI_MODE, tp->mi_mode);
1483         udelay(80);
1484
1485         if (tg3_flag(tp, MDIOBUS_INITED) &&
1486             tg3_asic_rev(tp) == ASIC_REV_5785)
1487                 tg3_mdio_config_5785(tp);
1488 }
1489
1490 static int tg3_mdio_init(struct tg3 *tp)
1491 {
1492         int i;
1493         u32 reg;
1494         struct phy_device *phydev;
1495
1496         if (tg3_flag(tp, 5717_PLUS)) {
1497                 u32 is_serdes;
1498
1499                 tp->phy_addr = tp->pci_fn + 1;
1500
1501                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1502                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1503                 else
1504                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1505                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1506                 if (is_serdes)
1507                         tp->phy_addr += 7;
1508         } else
1509                 tp->phy_addr = TG3_PHY_MII_ADDR;
1510
1511         tg3_mdio_start(tp);
1512
1513         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1514                 return 0;
1515
1516         tp->mdio_bus = mdiobus_alloc();
1517         if (tp->mdio_bus == NULL)
1518                 return -ENOMEM;
1519
1520         tp->mdio_bus->name     = "tg3 mdio bus";
1521         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1522                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1523         tp->mdio_bus->priv     = tp;
1524         tp->mdio_bus->parent   = &tp->pdev->dev;
1525         tp->mdio_bus->read     = &tg3_mdio_read;
1526         tp->mdio_bus->write    = &tg3_mdio_write;
1527         tp->mdio_bus->reset    = &tg3_mdio_reset;
1528         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1529         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1530
1531         for (i = 0; i < PHY_MAX_ADDR; i++)
1532                 tp->mdio_bus->irq[i] = PHY_POLL;
1533
1534         /* The bus registration will look for all the PHYs on the mdio bus.
1535          * Unfortunately, it does not ensure the PHY is powered up before
1536          * accessing the PHY ID registers.  A chip reset is the
1537          * quickest way to bring the device back to an operational state..
1538          */
1539         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1540                 tg3_bmcr_reset(tp);
1541
1542         i = mdiobus_register(tp->mdio_bus);
1543         if (i) {
1544                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1545                 mdiobus_free(tp->mdio_bus);
1546                 return i;
1547         }
1548
1549         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1550
1551         if (!phydev || !phydev->drv) {
1552                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1553                 mdiobus_unregister(tp->mdio_bus);
1554                 mdiobus_free(tp->mdio_bus);
1555                 return -ENODEV;
1556         }
1557
1558         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1559         case PHY_ID_BCM57780:
1560                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1561                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1562                 break;
1563         case PHY_ID_BCM50610:
1564         case PHY_ID_BCM50610M:
1565                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1566                                      PHY_BRCM_RX_REFCLK_UNUSED |
1567                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1568                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1569                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1570                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1571                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1572                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1573                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1574                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1575                 /* fallthru */
1576         case PHY_ID_RTL8211C:
1577                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1578                 break;
1579         case PHY_ID_RTL8201E:
1580         case PHY_ID_BCMAC131:
1581                 phydev->interface = PHY_INTERFACE_MODE_MII;
1582                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584                 break;
1585         }
1586
1587         tg3_flag_set(tp, MDIOBUS_INITED);
1588
1589         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1590                 tg3_mdio_config_5785(tp);
1591
1592         return 0;
1593 }
1594
1595 static void tg3_mdio_fini(struct tg3 *tp)
1596 {
1597         if (tg3_flag(tp, MDIOBUS_INITED)) {
1598                 tg3_flag_clear(tp, MDIOBUS_INITED);
1599                 mdiobus_unregister(tp->mdio_bus);
1600                 mdiobus_free(tp->mdio_bus);
1601         }
1602 }
1603
1604 /* tp->lock is held. */
1605 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 {
1607         u32 val;
1608
1609         val = tr32(GRC_RX_CPU_EVENT);
1610         val |= GRC_RX_CPU_DRIVER_EVENT;
1611         tw32_f(GRC_RX_CPU_EVENT, val);
1612
1613         tp->last_event_jiffies = jiffies;
1614 }
1615
1616 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1617
1618 /* tp->lock is held. */
1619 static void tg3_wait_for_event_ack(struct tg3 *tp)
1620 {
1621         int i;
1622         unsigned int delay_cnt;
1623         long time_remain;
1624
1625         /* If enough time has passed, no wait is necessary. */
1626         time_remain = (long)(tp->last_event_jiffies + 1 +
1627                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1628                       (long)jiffies;
1629         if (time_remain < 0)
1630                 return;
1631
1632         /* Check if we can shorten the wait time. */
1633         delay_cnt = jiffies_to_usecs(time_remain);
1634         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1635                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1636         delay_cnt = (delay_cnt >> 3) + 1;
1637
1638         for (i = 0; i < delay_cnt; i++) {
1639                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1640                         break;
1641                 if (pci_channel_offline(tp->pdev))
1642                         break;
1643
1644                 udelay(8);
1645         }
1646 }
1647
1648 /* tp->lock is held. */
1649 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1650 {
1651         u32 reg, val;
1652
1653         val = 0;
1654         if (!tg3_readphy(tp, MII_BMCR, &reg))
1655                 val = reg << 16;
1656         if (!tg3_readphy(tp, MII_BMSR, &reg))
1657                 val |= (reg & 0xffff);
1658         *data++ = val;
1659
1660         val = 0;
1661         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1662                 val = reg << 16;
1663         if (!tg3_readphy(tp, MII_LPA, &reg))
1664                 val |= (reg & 0xffff);
1665         *data++ = val;
1666
1667         val = 0;
1668         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1669                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1670                         val = reg << 16;
1671                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1672                         val |= (reg & 0xffff);
1673         }
1674         *data++ = val;
1675
1676         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1677                 val = reg << 16;
1678         else
1679                 val = 0;
1680         *data++ = val;
1681 }
1682
1683 /* tp->lock is held. */
1684 static void tg3_ump_link_report(struct tg3 *tp)
1685 {
1686         u32 data[4];
1687
1688         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1689                 return;
1690
1691         tg3_phy_gather_ump_data(tp, data);
1692
1693         tg3_wait_for_event_ack(tp);
1694
1695         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1696         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1697         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1698         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1699         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1700         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1701
1702         tg3_generate_fw_event(tp);
1703 }
1704
1705 /* tp->lock is held. */
1706 static void tg3_stop_fw(struct tg3 *tp)
1707 {
1708         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1709                 /* Wait for RX cpu to ACK the previous event. */
1710                 tg3_wait_for_event_ack(tp);
1711
1712                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1713
1714                 tg3_generate_fw_event(tp);
1715
1716                 /* Wait for RX cpu to ACK this event. */
1717                 tg3_wait_for_event_ack(tp);
1718         }
1719 }
1720
1721 /* tp->lock is held. */
1722 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1723 {
1724         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1725                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1726
1727         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1728                 switch (kind) {
1729                 case RESET_KIND_INIT:
1730                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1731                                       DRV_STATE_START);
1732                         break;
1733
1734                 case RESET_KIND_SHUTDOWN:
1735                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1736                                       DRV_STATE_UNLOAD);
1737                         break;
1738
1739                 case RESET_KIND_SUSPEND:
1740                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741                                       DRV_STATE_SUSPEND);
1742                         break;
1743
1744                 default:
1745                         break;
1746                 }
1747         }
1748
1749         if (kind == RESET_KIND_INIT ||
1750             kind == RESET_KIND_SUSPEND)
1751                 tg3_ape_driver_state_change(tp, kind);
1752 }
1753
1754 /* tp->lock is held. */
1755 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1756 {
1757         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1758                 switch (kind) {
1759                 case RESET_KIND_INIT:
1760                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1761                                       DRV_STATE_START_DONE);
1762                         break;
1763
1764                 case RESET_KIND_SHUTDOWN:
1765                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766                                       DRV_STATE_UNLOAD_DONE);
1767                         break;
1768
1769                 default:
1770                         break;
1771                 }
1772         }
1773
1774         if (kind == RESET_KIND_SHUTDOWN)
1775                 tg3_ape_driver_state_change(tp, kind);
1776 }
1777
1778 /* tp->lock is held. */
1779 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1780 {
1781         if (tg3_flag(tp, ENABLE_ASF)) {
1782                 switch (kind) {
1783                 case RESET_KIND_INIT:
1784                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785                                       DRV_STATE_START);
1786                         break;
1787
1788                 case RESET_KIND_SHUTDOWN:
1789                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1790                                       DRV_STATE_UNLOAD);
1791                         break;
1792
1793                 case RESET_KIND_SUSPEND:
1794                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1795                                       DRV_STATE_SUSPEND);
1796                         break;
1797
1798                 default:
1799                         break;
1800                 }
1801         }
1802 }
1803
1804 static int tg3_poll_fw(struct tg3 *tp)
1805 {
1806         int i;
1807         u32 val;
1808
1809         if (tg3_flag(tp, NO_FWARE_REPORTED))
1810                 return 0;
1811
1812         if (tg3_flag(tp, IS_SSB_CORE)) {
1813                 /* We don't use firmware. */
1814                 return 0;
1815         }
1816
1817         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1818                 /* Wait up to 20ms for init done. */
1819                 for (i = 0; i < 200; i++) {
1820                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1821                                 return 0;
1822                         if (pci_channel_offline(tp->pdev))
1823                                 return -ENODEV;
1824
1825                         udelay(100);
1826                 }
1827                 return -ENODEV;
1828         }
1829
1830         /* Wait for firmware initialization to complete. */
1831         for (i = 0; i < 100000; i++) {
1832                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1833                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1834                         break;
1835                 if (pci_channel_offline(tp->pdev)) {
1836                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1837                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1838                                 netdev_info(tp->dev, "No firmware running\n");
1839                         }
1840
1841                         break;
1842                 }
1843
1844                 udelay(10);
1845         }
1846
1847         /* Chip might not be fitted with firmware.  Some Sun onboard
1848          * parts are configured like that.  So don't signal the timeout
1849          * of the above loop as an error, but do report the lack of
1850          * running firmware once.
1851          */
1852         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1853                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1854
1855                 netdev_info(tp->dev, "No firmware running\n");
1856         }
1857
1858         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1859                 /* The 57765 A0 needs a little more
1860                  * time to do some important work.
1861                  */
1862                 mdelay(10);
1863         }
1864
1865         return 0;
1866 }
1867
1868 static void tg3_link_report(struct tg3 *tp)
1869 {
1870         if (!netif_carrier_ok(tp->dev)) {
1871                 netif_info(tp, link, tp->dev, "Link is down\n");
1872                 tg3_ump_link_report(tp);
1873         } else if (netif_msg_link(tp)) {
1874                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1875                             (tp->link_config.active_speed == SPEED_1000 ?
1876                              1000 :
1877                              (tp->link_config.active_speed == SPEED_100 ?
1878                               100 : 10)),
1879                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1880                              "full" : "half"));
1881
1882                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1883                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1884                             "on" : "off",
1885                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1886                             "on" : "off");
1887
1888                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1889                         netdev_info(tp->dev, "EEE is %s\n",
1890                                     tp->setlpicnt ? "enabled" : "disabled");
1891
1892                 tg3_ump_link_report(tp);
1893         }
1894
1895         tp->link_up = netif_carrier_ok(tp->dev);
1896 }
1897
1898 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1899 {
1900         u32 flowctrl = 0;
1901
1902         if (adv & ADVERTISE_PAUSE_CAP) {
1903                 flowctrl |= FLOW_CTRL_RX;
1904                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1905                         flowctrl |= FLOW_CTRL_TX;
1906         } else if (adv & ADVERTISE_PAUSE_ASYM)
1907                 flowctrl |= FLOW_CTRL_TX;
1908
1909         return flowctrl;
1910 }
1911
1912 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1913 {
1914         u16 miireg;
1915
1916         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1917                 miireg = ADVERTISE_1000XPAUSE;
1918         else if (flow_ctrl & FLOW_CTRL_TX)
1919                 miireg = ADVERTISE_1000XPSE_ASYM;
1920         else if (flow_ctrl & FLOW_CTRL_RX)
1921                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1922         else
1923                 miireg = 0;
1924
1925         return miireg;
1926 }
1927
1928 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1929 {
1930         u32 flowctrl = 0;
1931
1932         if (adv & ADVERTISE_1000XPAUSE) {
1933                 flowctrl |= FLOW_CTRL_RX;
1934                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1935                         flowctrl |= FLOW_CTRL_TX;
1936         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1937                 flowctrl |= FLOW_CTRL_TX;
1938
1939         return flowctrl;
1940 }
1941
1942 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1943 {
1944         u8 cap = 0;
1945
1946         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1947                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1948         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1949                 if (lcladv & ADVERTISE_1000XPAUSE)
1950                         cap = FLOW_CTRL_RX;
1951                 if (rmtadv & ADVERTISE_1000XPAUSE)
1952                         cap = FLOW_CTRL_TX;
1953         }
1954
1955         return cap;
1956 }
1957
1958 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1959 {
1960         u8 autoneg;
1961         u8 flowctrl = 0;
1962         u32 old_rx_mode = tp->rx_mode;
1963         u32 old_tx_mode = tp->tx_mode;
1964
1965         if (tg3_flag(tp, USE_PHYLIB))
1966                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1967         else
1968                 autoneg = tp->link_config.autoneg;
1969
1970         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1971                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1972                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1973                 else
1974                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1975         } else
1976                 flowctrl = tp->link_config.flowctrl;
1977
1978         tp->link_config.active_flowctrl = flowctrl;
1979
1980         if (flowctrl & FLOW_CTRL_RX)
1981                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1982         else
1983                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1984
1985         if (old_rx_mode != tp->rx_mode)
1986                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1987
1988         if (flowctrl & FLOW_CTRL_TX)
1989                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1990         else
1991                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1992
1993         if (old_tx_mode != tp->tx_mode)
1994                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1995 }
1996
1997 static void tg3_adjust_link(struct net_device *dev)
1998 {
1999         u8 oldflowctrl, linkmesg = 0;
2000         u32 mac_mode, lcl_adv, rmt_adv;
2001         struct tg3 *tp = netdev_priv(dev);
2002         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2003
2004         spin_lock_bh(&tp->lock);
2005
2006         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2007                                     MAC_MODE_HALF_DUPLEX);
2008
2009         oldflowctrl = tp->link_config.active_flowctrl;
2010
2011         if (phydev->link) {
2012                 lcl_adv = 0;
2013                 rmt_adv = 0;
2014
2015                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2016                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2017                 else if (phydev->speed == SPEED_1000 ||
2018                          tg3_asic_rev(tp) != ASIC_REV_5785)
2019                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2020                 else
2021                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2022
2023                 if (phydev->duplex == DUPLEX_HALF)
2024                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2025                 else {
2026                         lcl_adv = mii_advertise_flowctrl(
2027                                   tp->link_config.flowctrl);
2028
2029                         if (phydev->pause)
2030                                 rmt_adv = LPA_PAUSE_CAP;
2031                         if (phydev->asym_pause)
2032                                 rmt_adv |= LPA_PAUSE_ASYM;
2033                 }
2034
2035                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2036         } else
2037                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2038
2039         if (mac_mode != tp->mac_mode) {
2040                 tp->mac_mode = mac_mode;
2041                 tw32_f(MAC_MODE, tp->mac_mode);
2042                 udelay(40);
2043         }
2044
2045         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2046                 if (phydev->speed == SPEED_10)
2047                         tw32(MAC_MI_STAT,
2048                              MAC_MI_STAT_10MBPS_MODE |
2049                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2050                 else
2051                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052         }
2053
2054         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2055                 tw32(MAC_TX_LENGTHS,
2056                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2057                       (6 << TX_LENGTHS_IPG_SHIFT) |
2058                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2059         else
2060                 tw32(MAC_TX_LENGTHS,
2061                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2062                       (6 << TX_LENGTHS_IPG_SHIFT) |
2063                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2064
2065         if (phydev->link != tp->old_link ||
2066             phydev->speed != tp->link_config.active_speed ||
2067             phydev->duplex != tp->link_config.active_duplex ||
2068             oldflowctrl != tp->link_config.active_flowctrl)
2069                 linkmesg = 1;
2070
2071         tp->old_link = phydev->link;
2072         tp->link_config.active_speed = phydev->speed;
2073         tp->link_config.active_duplex = phydev->duplex;
2074
2075         spin_unlock_bh(&tp->lock);
2076
2077         if (linkmesg)
2078                 tg3_link_report(tp);
2079 }
2080
2081 static int tg3_phy_init(struct tg3 *tp)
2082 {
2083         struct phy_device *phydev;
2084
2085         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2086                 return 0;
2087
2088         /* Bring the PHY back to a known state. */
2089         tg3_bmcr_reset(tp);
2090
2091         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2092
2093         /* Attach the MAC to the PHY. */
2094         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2095                              tg3_adjust_link, phydev->interface);
2096         if (IS_ERR(phydev)) {
2097                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2098                 return PTR_ERR(phydev);
2099         }
2100
2101         /* Mask with MAC supported features. */
2102         switch (phydev->interface) {
2103         case PHY_INTERFACE_MODE_GMII:
2104         case PHY_INTERFACE_MODE_RGMII:
2105                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2106                         phydev->supported &= (PHY_GBIT_FEATURES |
2107                                               SUPPORTED_Pause |
2108                                               SUPPORTED_Asym_Pause);
2109                         break;
2110                 }
2111                 /* fallthru */
2112         case PHY_INTERFACE_MODE_MII:
2113                 phydev->supported &= (PHY_BASIC_FEATURES |
2114                                       SUPPORTED_Pause |
2115                                       SUPPORTED_Asym_Pause);
2116                 break;
2117         default:
2118                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2119                 return -EINVAL;
2120         }
2121
2122         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123
2124         phydev->advertising = phydev->supported;
2125
2126         return 0;
2127 }
2128
2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131         struct phy_device *phydev;
2132
2133         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134                 return;
2135
2136         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2137
2138         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140                 phydev->speed = tp->link_config.speed;
2141                 phydev->duplex = tp->link_config.duplex;
2142                 phydev->autoneg = tp->link_config.autoneg;
2143                 phydev->advertising = tp->link_config.advertising;
2144         }
2145
2146         phy_start(phydev);
2147
2148         phy_start_aneg(phydev);
2149 }
2150
2151 static void tg3_phy_stop(struct tg3 *tp)
2152 {
2153         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2154                 return;
2155
2156         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2157 }
2158
2159 static void tg3_phy_fini(struct tg3 *tp)
2160 {
2161         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2163                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2164         }
2165 }
2166
2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2168 {
2169         int err;
2170         u32 val;
2171
2172         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2173                 return 0;
2174
2175         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176                 /* Cannot do read-modify-write on 5401 */
2177                 err = tg3_phy_auxctl_write(tp,
2178                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2180                                            0x4c20);
2181                 goto done;
2182         }
2183
2184         err = tg3_phy_auxctl_read(tp,
2185                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2186         if (err)
2187                 return err;
2188
2189         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190         err = tg3_phy_auxctl_write(tp,
2191                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2192
2193 done:
2194         return err;
2195 }
2196
2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2198 {
2199         u32 phytest;
2200
2201         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2202                 u32 phy;
2203
2204                 tg3_writephy(tp, MII_TG3_FET_TEST,
2205                              phytest | MII_TG3_FET_SHADOW_EN);
2206                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2207                         if (enable)
2208                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2209                         else
2210                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2212                 }
2213                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2214         }
2215 }
2216
2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2218 {
2219         u32 reg;
2220
2221         if (!tg3_flag(tp, 5705_PLUS) ||
2222             (tg3_flag(tp, 5717_PLUS) &&
2223              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2224                 return;
2225
2226         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227                 tg3_phy_fet_toggle_apd(tp, enable);
2228                 return;
2229         }
2230
2231         reg = MII_TG3_MISC_SHDW_WREN |
2232               MII_TG3_MISC_SHDW_SCR5_SEL |
2233               MII_TG3_MISC_SHDW_SCR5_LPED |
2234               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235               MII_TG3_MISC_SHDW_SCR5_SDTL |
2236               MII_TG3_MISC_SHDW_SCR5_C125OE;
2237         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239
2240         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2241
2242
2243         reg = MII_TG3_MISC_SHDW_WREN |
2244               MII_TG3_MISC_SHDW_APD_SEL |
2245               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2246         if (enable)
2247                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2248
2249         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2250 }
2251
2252 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2253 {
2254         u32 phy;
2255
2256         if (!tg3_flag(tp, 5705_PLUS) ||
2257             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2258                 return;
2259
2260         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2261                 u32 ephy;
2262
2263                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2264                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2265
2266                         tg3_writephy(tp, MII_TG3_FET_TEST,
2267                                      ephy | MII_TG3_FET_SHADOW_EN);
2268                         if (!tg3_readphy(tp, reg, &phy)) {
2269                                 if (enable)
2270                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271                                 else
2272                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2273                                 tg3_writephy(tp, reg, phy);
2274                         }
2275                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2276                 }
2277         } else {
2278                 int ret;
2279
2280                 ret = tg3_phy_auxctl_read(tp,
2281                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2282                 if (!ret) {
2283                         if (enable)
2284                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285                         else
2286                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2287                         tg3_phy_auxctl_write(tp,
2288                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2289                 }
2290         }
2291 }
2292
2293 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2294 {
2295         int ret;
2296         u32 val;
2297
2298         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2299                 return;
2300
2301         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2302         if (!ret)
2303                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2304                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2305 }
2306
2307 static void tg3_phy_apply_otp(struct tg3 *tp)
2308 {
2309         u32 otp, phy;
2310
2311         if (!tp->phy_otp)
2312                 return;
2313
2314         otp = tp->phy_otp;
2315
2316         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2317                 return;
2318
2319         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2320         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2321         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2322
2323         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2324               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2325         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2326
2327         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2328         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2329         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2330
2331         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2332         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2333
2334         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2335         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2336
2337         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2338               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2339         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2340
2341         tg3_phy_toggle_auxctl_smdsp(tp, false);
2342 }
2343
2344 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2345 {
2346         u32 val;
2347
2348         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2349                 return;
2350
2351         tp->setlpicnt = 0;
2352
2353         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2354             current_link_up &&
2355             tp->link_config.active_duplex == DUPLEX_FULL &&
2356             (tp->link_config.active_speed == SPEED_100 ||
2357              tp->link_config.active_speed == SPEED_1000)) {
2358                 u32 eeectl;
2359
2360                 if (tp->link_config.active_speed == SPEED_1000)
2361                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2362                 else
2363                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2364
2365                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2366
2367                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2368                                   TG3_CL45_D7_EEERES_STAT, &val);
2369
2370                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2371                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2372                         tp->setlpicnt = 2;
2373         }
2374
2375         if (!tp->setlpicnt) {
2376                 if (current_link_up &&
2377                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2378                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2379                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2380                 }
2381
2382                 val = tr32(TG3_CPMU_EEE_MODE);
2383                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2384         }
2385 }
2386
2387 static void tg3_phy_eee_enable(struct tg3 *tp)
2388 {
2389         u32 val;
2390
2391         if (tp->link_config.active_speed == SPEED_1000 &&
2392             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2393              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2394              tg3_flag(tp, 57765_CLASS)) &&
2395             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2396                 val = MII_TG3_DSP_TAP26_ALNOKO |
2397                       MII_TG3_DSP_TAP26_RMRXSTO;
2398                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2399                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2400         }
2401
2402         val = tr32(TG3_CPMU_EEE_MODE);
2403         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2404 }
2405
2406 static int tg3_wait_macro_done(struct tg3 *tp)
2407 {
2408         int limit = 100;
2409
2410         while (limit--) {
2411                 u32 tmp32;
2412
2413                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2414                         if ((tmp32 & 0x1000) == 0)
2415                                 break;
2416                 }
2417         }
2418         if (limit < 0)
2419                 return -EBUSY;
2420
2421         return 0;
2422 }
2423
2424 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2425 {
2426         static const u32 test_pat[4][6] = {
2427         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2428         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2429         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2430         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2431         };
2432         int chan;
2433
2434         for (chan = 0; chan < 4; chan++) {
2435                 int i;
2436
2437                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2438                              (chan * 0x2000) | 0x0200);
2439                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2440
2441                 for (i = 0; i < 6; i++)
2442                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2443                                      test_pat[chan][i]);
2444
2445                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2446                 if (tg3_wait_macro_done(tp)) {
2447                         *resetp = 1;
2448                         return -EBUSY;
2449                 }
2450
2451                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2452                              (chan * 0x2000) | 0x0200);
2453                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2454                 if (tg3_wait_macro_done(tp)) {
2455                         *resetp = 1;
2456                         return -EBUSY;
2457                 }
2458
2459                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2460                 if (tg3_wait_macro_done(tp)) {
2461                         *resetp = 1;
2462                         return -EBUSY;
2463                 }
2464
2465                 for (i = 0; i < 6; i += 2) {
2466                         u32 low, high;
2467
2468                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2469                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2470                             tg3_wait_macro_done(tp)) {
2471                                 *resetp = 1;
2472                                 return -EBUSY;
2473                         }
2474                         low &= 0x7fff;
2475                         high &= 0x000f;
2476                         if (low != test_pat[chan][i] ||
2477                             high != test_pat[chan][i+1]) {
2478                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2479                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2480                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2481
2482                                 return -EBUSY;
2483                         }
2484                 }
2485         }
2486
2487         return 0;
2488 }
2489
2490 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2491 {
2492         int chan;
2493
2494         for (chan = 0; chan < 4; chan++) {
2495                 int i;
2496
2497                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2498                              (chan * 0x2000) | 0x0200);
2499                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2500                 for (i = 0; i < 6; i++)
2501                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2502                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2503                 if (tg3_wait_macro_done(tp))
2504                         return -EBUSY;
2505         }
2506
2507         return 0;
2508 }
2509
2510 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2511 {
2512         u32 reg32, phy9_orig;
2513         int retries, do_phy_reset, err;
2514
2515         retries = 10;
2516         do_phy_reset = 1;
2517         do {
2518                 if (do_phy_reset) {
2519                         err = tg3_bmcr_reset(tp);
2520                         if (err)
2521                                 return err;
2522                         do_phy_reset = 0;
2523                 }
2524
2525                 /* Disable transmitter and interrupt.  */
2526                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2527                         continue;
2528
2529                 reg32 |= 0x3000;
2530                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2531
2532                 /* Set full-duplex, 1000 mbps.  */
2533                 tg3_writephy(tp, MII_BMCR,
2534                              BMCR_FULLDPLX | BMCR_SPEED1000);
2535
2536                 /* Set to master mode.  */
2537                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2538                         continue;
2539
2540                 tg3_writephy(tp, MII_CTRL1000,
2541                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2542
2543                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2544                 if (err)
2545                         return err;
2546
2547                 /* Block the PHY control access.  */
2548                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2549
2550                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2551                 if (!err)
2552                         break;
2553         } while (--retries);
2554
2555         err = tg3_phy_reset_chanpat(tp);
2556         if (err)
2557                 return err;
2558
2559         tg3_phydsp_write(tp, 0x8005, 0x0000);
2560
2561         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2562         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2563
2564         tg3_phy_toggle_auxctl_smdsp(tp, false);
2565
2566         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2567
2568         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2569                 reg32 &= ~0x3000;
2570                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2571         } else if (!err)
2572                 err = -EBUSY;
2573
2574         return err;
2575 }
2576
2577 static void tg3_carrier_off(struct tg3 *tp)
2578 {
2579         netif_carrier_off(tp->dev);
2580         tp->link_up = false;
2581 }
2582
2583 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2584 {
2585         if (tg3_flag(tp, ENABLE_ASF))
2586                 netdev_warn(tp->dev,
2587                             "Management side-band traffic will be interrupted during phy settings change\n");
2588 }
2589
2590 /* This will reset the tigon3 PHY if there is no valid
2591  * link unless the FORCE argument is non-zero.
2592  */
2593 static int tg3_phy_reset(struct tg3 *tp)
2594 {
2595         u32 val, cpmuctrl;
2596         int err;
2597
2598         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2599                 val = tr32(GRC_MISC_CFG);
2600                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2601                 udelay(40);
2602         }
2603         err  = tg3_readphy(tp, MII_BMSR, &val);
2604         err |= tg3_readphy(tp, MII_BMSR, &val);
2605         if (err != 0)
2606                 return -EBUSY;
2607
2608         if (netif_running(tp->dev) && tp->link_up) {
2609                 netif_carrier_off(tp->dev);
2610                 tg3_link_report(tp);
2611         }
2612
2613         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2614             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2615             tg3_asic_rev(tp) == ASIC_REV_5705) {
2616                 err = tg3_phy_reset_5703_4_5(tp);
2617                 if (err)
2618                         return err;
2619                 goto out;
2620         }
2621
2622         cpmuctrl = 0;
2623         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2624             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2625                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2626                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2627                         tw32(TG3_CPMU_CTRL,
2628                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2629         }
2630
2631         err = tg3_bmcr_reset(tp);
2632         if (err)
2633                 return err;
2634
2635         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2636                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2637                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2638
2639                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2640         }
2641
2642         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2643             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2644                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2645                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2646                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2647                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2648                         udelay(40);
2649                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2650                 }
2651         }
2652
2653         if (tg3_flag(tp, 5717_PLUS) &&
2654             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2655                 return 0;
2656
2657         tg3_phy_apply_otp(tp);
2658
2659         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2660                 tg3_phy_toggle_apd(tp, true);
2661         else
2662                 tg3_phy_toggle_apd(tp, false);
2663
2664 out:
2665         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2666             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2667                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2668                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2669                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2670         }
2671
2672         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2673                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2674                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2675         }
2676
2677         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2678                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2679                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2680                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2681                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2682                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2683                 }
2684         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2685                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2686                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2687                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2688                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2689                                 tg3_writephy(tp, MII_TG3_TEST1,
2690                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2691                         } else
2692                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2693
2694                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2695                 }
2696         }
2697
2698         /* Set Extended packet length bit (bit 14) on all chips that */
2699         /* support jumbo frames */
2700         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2701                 /* Cannot do read-modify-write on 5401 */
2702                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2703         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2704                 /* Set bit 14 with read-modify-write to preserve other bits */
2705                 err = tg3_phy_auxctl_read(tp,
2706                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2707                 if (!err)
2708                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2709                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2710         }
2711
2712         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2713          * jumbo frames transmission.
2714          */
2715         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2716                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2717                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2718                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2719         }
2720
2721         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2722                 /* adjust output voltage */
2723                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2724         }
2725
2726         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2727                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2728
2729         tg3_phy_toggle_automdix(tp, true);
2730         tg3_phy_set_wirespeed(tp);
2731         return 0;
2732 }
2733
2734 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2735 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2736 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2737                                           TG3_GPIO_MSG_NEED_VAUX)
2738 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2739         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2740          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2741          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2742          (TG3_GPIO_MSG_DRVR_PRES << 12))
2743
2744 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2745         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2746          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2747          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2748          (TG3_GPIO_MSG_NEED_VAUX << 12))
2749
2750 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2751 {
2752         u32 status, shift;
2753
2754         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2755             tg3_asic_rev(tp) == ASIC_REV_5719)
2756                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2757         else
2758                 status = tr32(TG3_CPMU_DRV_STATUS);
2759
2760         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2761         status &= ~(TG3_GPIO_MSG_MASK << shift);
2762         status |= (newstat << shift);
2763
2764         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2765             tg3_asic_rev(tp) == ASIC_REV_5719)
2766                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2767         else
2768                 tw32(TG3_CPMU_DRV_STATUS, status);
2769
2770         return status >> TG3_APE_GPIO_MSG_SHIFT;
2771 }
2772
2773 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2774 {
2775         if (!tg3_flag(tp, IS_NIC))
2776                 return 0;
2777
2778         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2779             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2780             tg3_asic_rev(tp) == ASIC_REV_5720) {
2781                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2782                         return -EIO;
2783
2784                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2785
2786                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2787                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2788
2789                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2790         } else {
2791                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2792                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2793         }
2794
2795         return 0;
2796 }
2797
2798 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2799 {
2800         u32 grc_local_ctrl;
2801
2802         if (!tg3_flag(tp, IS_NIC) ||
2803             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2804             tg3_asic_rev(tp) == ASIC_REV_5701)
2805                 return;
2806
2807         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2808
2809         tw32_wait_f(GRC_LOCAL_CTRL,
2810                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2811                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2812
2813         tw32_wait_f(GRC_LOCAL_CTRL,
2814                     grc_local_ctrl,
2815                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2816
2817         tw32_wait_f(GRC_LOCAL_CTRL,
2818                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2819                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2820 }
2821
2822 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2823 {
2824         if (!tg3_flag(tp, IS_NIC))
2825                 return;
2826
2827         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2828             tg3_asic_rev(tp) == ASIC_REV_5701) {
2829                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2830                             (GRC_LCLCTRL_GPIO_OE0 |
2831                              GRC_LCLCTRL_GPIO_OE1 |
2832                              GRC_LCLCTRL_GPIO_OE2 |
2833                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2834                              GRC_LCLCTRL_GPIO_OUTPUT1),
2835                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2836         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2837                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2838                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2839                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2840                                      GRC_LCLCTRL_GPIO_OE1 |
2841                                      GRC_LCLCTRL_GPIO_OE2 |
2842                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2843                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2844                                      tp->grc_local_ctrl;
2845                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2846                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2849                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2850                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2853                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2854                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2855         } else {
2856                 u32 no_gpio2;
2857                 u32 grc_local_ctrl = 0;
2858
2859                 /* Workaround to prevent overdrawing Amps. */
2860                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2861                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2862                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2863                                     grc_local_ctrl,
2864                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2865                 }
2866
2867                 /* On 5753 and variants, GPIO2 cannot be used. */
2868                 no_gpio2 = tp->nic_sram_data_cfg &
2869                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2870
2871                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2872                                   GRC_LCLCTRL_GPIO_OE1 |
2873                                   GRC_LCLCTRL_GPIO_OE2 |
2874                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2875                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2876                 if (no_gpio2) {
2877                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2878                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2879                 }
2880                 tw32_wait_f(GRC_LOCAL_CTRL,
2881                             tp->grc_local_ctrl | grc_local_ctrl,
2882                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2883
2884                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2885
2886                 tw32_wait_f(GRC_LOCAL_CTRL,
2887                             tp->grc_local_ctrl | grc_local_ctrl,
2888                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2889
2890                 if (!no_gpio2) {
2891                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2892                         tw32_wait_f(GRC_LOCAL_CTRL,
2893                                     tp->grc_local_ctrl | grc_local_ctrl,
2894                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2895                 }
2896         }
2897 }
2898
2899 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2900 {
2901         u32 msg = 0;
2902
2903         /* Serialize power state transitions */
2904         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2905                 return;
2906
2907         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2908                 msg = TG3_GPIO_MSG_NEED_VAUX;
2909
2910         msg = tg3_set_function_status(tp, msg);
2911
2912         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2913                 goto done;
2914
2915         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2916                 tg3_pwrsrc_switch_to_vaux(tp);
2917         else
2918                 tg3_pwrsrc_die_with_vmain(tp);
2919
2920 done:
2921         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2922 }
2923
2924 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2925 {
2926         bool need_vaux = false;
2927
2928         /* The GPIOs do something completely different on 57765. */
2929         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2930                 return;
2931
2932         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2933             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2934             tg3_asic_rev(tp) == ASIC_REV_5720) {
2935                 tg3_frob_aux_power_5717(tp, include_wol ?
2936                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2937                 return;
2938         }
2939
2940         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2941                 struct net_device *dev_peer;
2942
2943                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2944
2945                 /* remove_one() may have been run on the peer. */
2946                 if (dev_peer) {
2947                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2948
2949                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2950                                 return;
2951
2952                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2953                             tg3_flag(tp_peer, ENABLE_ASF))
2954                                 need_vaux = true;
2955                 }
2956         }
2957
2958         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2959             tg3_flag(tp, ENABLE_ASF))
2960                 need_vaux = true;
2961
2962         if (need_vaux)
2963                 tg3_pwrsrc_switch_to_vaux(tp);
2964         else
2965                 tg3_pwrsrc_die_with_vmain(tp);
2966 }
2967
2968 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2969 {
2970         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2971                 return 1;
2972         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2973                 if (speed != SPEED_10)
2974                         return 1;
2975         } else if (speed == SPEED_10)
2976                 return 1;
2977
2978         return 0;
2979 }
2980
2981 static bool tg3_phy_power_bug(struct tg3 *tp)
2982 {
2983         switch (tg3_asic_rev(tp)) {
2984         case ASIC_REV_5700:
2985         case ASIC_REV_5704:
2986                 return true;
2987         case ASIC_REV_5780:
2988                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2989                         return true;
2990                 return false;
2991         case ASIC_REV_5717:
2992                 if (!tp->pci_fn)
2993                         return true;
2994                 return false;
2995         case ASIC_REV_5719:
2996         case ASIC_REV_5720:
2997                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2998                     !tp->pci_fn)
2999                         return true;
3000                 return false;
3001         }
3002
3003         return false;
3004 }
3005
3006 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3007 {
3008         u32 val;
3009
3010         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3011                 return;
3012
3013         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3014                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3015                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3016                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3017
3018                         sg_dig_ctrl |=
3019                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3020                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3021                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3022                 }
3023                 return;
3024         }
3025
3026         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3027                 tg3_bmcr_reset(tp);
3028                 val = tr32(GRC_MISC_CFG);
3029                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3030                 udelay(40);
3031                 return;
3032         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3033                 u32 phytest;
3034                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3035                         u32 phy;
3036
3037                         tg3_writephy(tp, MII_ADVERTISE, 0);
3038                         tg3_writephy(tp, MII_BMCR,
3039                                      BMCR_ANENABLE | BMCR_ANRESTART);
3040
3041                         tg3_writephy(tp, MII_TG3_FET_TEST,
3042                                      phytest | MII_TG3_FET_SHADOW_EN);
3043                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3044                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3045                                 tg3_writephy(tp,
3046                                              MII_TG3_FET_SHDW_AUXMODE4,
3047                                              phy);
3048                         }
3049                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3050                 }
3051                 return;
3052         } else if (do_low_power) {
3053                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3054                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3055
3056                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3057                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3058                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3059                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3060         }
3061
3062         /* The PHY should not be powered down on some chips because
3063          * of bugs.
3064          */
3065         if (tg3_phy_power_bug(tp))
3066                 return;
3067
3068         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3069             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3070                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3071                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3072                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3073                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3074         }
3075
3076         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3077 }
3078
3079 /* tp->lock is held. */
3080 static int tg3_nvram_lock(struct tg3 *tp)
3081 {
3082         if (tg3_flag(tp, NVRAM)) {
3083                 int i;
3084
3085                 if (tp->nvram_lock_cnt == 0) {
3086                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3087                         for (i = 0; i < 8000; i++) {
3088                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3089                                         break;
3090                                 udelay(20);
3091                         }
3092                         if (i == 8000) {
3093                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3094                                 return -ENODEV;
3095                         }
3096                 }
3097                 tp->nvram_lock_cnt++;
3098         }
3099         return 0;
3100 }
3101
3102 /* tp->lock is held. */
3103 static void tg3_nvram_unlock(struct tg3 *tp)
3104 {
3105         if (tg3_flag(tp, NVRAM)) {
3106                 if (tp->nvram_lock_cnt > 0)
3107                         tp->nvram_lock_cnt--;
3108                 if (tp->nvram_lock_cnt == 0)
3109                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3110         }
3111 }
3112
3113 /* tp->lock is held. */
3114 static void tg3_enable_nvram_access(struct tg3 *tp)
3115 {
3116         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3117                 u32 nvaccess = tr32(NVRAM_ACCESS);
3118
3119                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3120         }
3121 }
3122
3123 /* tp->lock is held. */
3124 static void tg3_disable_nvram_access(struct tg3 *tp)
3125 {
3126         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3127                 u32 nvaccess = tr32(NVRAM_ACCESS);
3128
3129                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3130         }
3131 }
3132
3133 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3134                                         u32 offset, u32 *val)
3135 {
3136         u32 tmp;
3137         int i;
3138
3139         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3140                 return -EINVAL;
3141
3142         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3143                                         EEPROM_ADDR_DEVID_MASK |
3144                                         EEPROM_ADDR_READ);
3145         tw32(GRC_EEPROM_ADDR,
3146              tmp |
3147              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3148              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3149               EEPROM_ADDR_ADDR_MASK) |
3150              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3151
3152         for (i = 0; i < 1000; i++) {
3153                 tmp = tr32(GRC_EEPROM_ADDR);
3154
3155                 if (tmp & EEPROM_ADDR_COMPLETE)
3156                         break;
3157                 msleep(1);
3158         }
3159         if (!(tmp & EEPROM_ADDR_COMPLETE))
3160                 return -EBUSY;
3161
3162         tmp = tr32(GRC_EEPROM_DATA);
3163
3164         /*
3165          * The data will always be opposite the native endian
3166          * format.  Perform a blind byteswap to compensate.
3167          */
3168         *val = swab32(tmp);
3169
3170         return 0;
3171 }
3172
3173 #define NVRAM_CMD_TIMEOUT 10000
3174
3175 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3176 {
3177         int i;
3178
3179         tw32(NVRAM_CMD, nvram_cmd);
3180         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3181                 udelay(10);
3182                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3183                         udelay(10);
3184                         break;
3185                 }
3186         }
3187
3188         if (i == NVRAM_CMD_TIMEOUT)
3189                 return -EBUSY;
3190
3191         return 0;
3192 }
3193
3194 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3195 {
3196         if (tg3_flag(tp, NVRAM) &&
3197             tg3_flag(tp, NVRAM_BUFFERED) &&
3198             tg3_flag(tp, FLASH) &&
3199             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3200             (tp->nvram_jedecnum == JEDEC_ATMEL))
3201
3202                 addr = ((addr / tp->nvram_pagesize) <<
3203                         ATMEL_AT45DB0X1B_PAGE_POS) +
3204                        (addr % tp->nvram_pagesize);
3205
3206         return addr;
3207 }
3208
3209 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3210 {
3211         if (tg3_flag(tp, NVRAM) &&
3212             tg3_flag(tp, NVRAM_BUFFERED) &&
3213             tg3_flag(tp, FLASH) &&
3214             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3215             (tp->nvram_jedecnum == JEDEC_ATMEL))
3216
3217                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3218                         tp->nvram_pagesize) +
3219                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3220
3221         return addr;
3222 }
3223
3224 /* NOTE: Data read in from NVRAM is byteswapped according to
3225  * the byteswapping settings for all other register accesses.
3226  * tg3 devices are BE devices, so on a BE machine, the data
3227  * returned will be exactly as it is seen in NVRAM.  On a LE
3228  * machine, the 32-bit value will be byteswapped.
3229  */
3230 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3231 {
3232         int ret;
3233
3234         if (!tg3_flag(tp, NVRAM))
3235                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3236
3237         offset = tg3_nvram_phys_addr(tp, offset);
3238
3239         if (offset > NVRAM_ADDR_MSK)
3240                 return -EINVAL;
3241
3242         ret = tg3_nvram_lock(tp);
3243         if (ret)
3244                 return ret;
3245
3246         tg3_enable_nvram_access(tp);
3247
3248         tw32(NVRAM_ADDR, offset);
3249         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3250                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3251
3252         if (ret == 0)
3253                 *val = tr32(NVRAM_RDDATA);
3254
3255         tg3_disable_nvram_access(tp);
3256
3257         tg3_nvram_unlock(tp);
3258
3259         return ret;
3260 }
3261
3262 /* Ensures NVRAM data is in bytestream format. */
3263 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3264 {
3265         u32 v;
3266         int res = tg3_nvram_read(tp, offset, &v);
3267         if (!res)
3268                 *val = cpu_to_be32(v);
3269         return res;
3270 }
3271
3272 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3273                                     u32 offset, u32 len, u8 *buf)
3274 {
3275         int i, j, rc = 0;
3276         u32 val;
3277
3278         for (i = 0; i < len; i += 4) {
3279                 u32 addr;
3280                 __be32 data;
3281
3282                 addr = offset + i;
3283
3284                 memcpy(&data, buf + i, 4);
3285
3286                 /*
3287                  * The SEEPROM interface expects the data to always be opposite
3288                  * the native endian format.  We accomplish this by reversing
3289                  * all the operations that would have been performed on the
3290                  * data from a call to tg3_nvram_read_be32().
3291                  */
3292                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3293
3294                 val = tr32(GRC_EEPROM_ADDR);
3295                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3296
3297                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3298                         EEPROM_ADDR_READ);
3299                 tw32(GRC_EEPROM_ADDR, val |
3300                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3301                         (addr & EEPROM_ADDR_ADDR_MASK) |
3302                         EEPROM_ADDR_START |
3303                         EEPROM_ADDR_WRITE);
3304
3305                 for (j = 0; j < 1000; j++) {
3306                         val = tr32(GRC_EEPROM_ADDR);
3307
3308                         if (val & EEPROM_ADDR_COMPLETE)
3309                                 break;
3310                         msleep(1);
3311                 }
3312                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3313                         rc = -EBUSY;
3314                         break;
3315                 }
3316         }
3317
3318         return rc;
3319 }
3320
3321 /* offset and length are dword aligned */
3322 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3323                 u8 *buf)
3324 {
3325         int ret = 0;
3326         u32 pagesize = tp->nvram_pagesize;
3327         u32 pagemask = pagesize - 1;
3328         u32 nvram_cmd;
3329         u8 *tmp;
3330
3331         tmp = kmalloc(pagesize, GFP_KERNEL);
3332         if (tmp == NULL)
3333                 return -ENOMEM;
3334
3335         while (len) {
3336                 int j;
3337                 u32 phy_addr, page_off, size;
3338
3339                 phy_addr = offset & ~pagemask;
3340
3341                 for (j = 0; j < pagesize; j += 4) {
3342                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3343                                                   (__be32 *) (tmp + j));
3344                         if (ret)
3345                                 break;
3346                 }
3347                 if (ret)
3348                         break;
3349
3350                 page_off = offset & pagemask;
3351                 size = pagesize;
3352                 if (len < size)
3353                         size = len;
3354
3355                 len -= size;
3356
3357                 memcpy(tmp + page_off, buf, size);
3358
3359                 offset = offset + (pagesize - page_off);
3360
3361                 tg3_enable_nvram_access(tp);
3362
3363                 /*
3364                  * Before we can erase the flash page, we need
3365                  * to issue a special "write enable" command.
3366                  */
3367                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3368
3369                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3370                         break;
3371
3372                 /* Erase the target page */
3373                 tw32(NVRAM_ADDR, phy_addr);
3374
3375                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3376                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3377
3378                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3379                         break;
3380
3381                 /* Issue another write enable to start the write. */
3382                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3383
3384                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3385                         break;
3386
3387                 for (j = 0; j < pagesize; j += 4) {
3388                         __be32 data;
3389
3390                         data = *((__be32 *) (tmp + j));
3391
3392                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3393
3394                         tw32(NVRAM_ADDR, phy_addr + j);
3395
3396                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3397                                 NVRAM_CMD_WR;
3398
3399                         if (j == 0)
3400                                 nvram_cmd |= NVRAM_CMD_FIRST;
3401                         else if (j == (pagesize - 4))
3402                                 nvram_cmd |= NVRAM_CMD_LAST;
3403
3404                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3405                         if (ret)
3406                                 break;
3407                 }
3408                 if (ret)
3409                         break;
3410         }
3411
3412         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3413         tg3_nvram_exec_cmd(tp, nvram_cmd);
3414
3415         kfree(tmp);
3416
3417         return ret;
3418 }
3419
3420 /* offset and length are dword aligned */
3421 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3422                 u8 *buf)
3423 {
3424         int i, ret = 0;
3425
3426         for (i = 0; i < len; i += 4, offset += 4) {
3427                 u32 page_off, phy_addr, nvram_cmd;
3428                 __be32 data;
3429
3430                 memcpy(&data, buf + i, 4);
3431                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3432
3433                 page_off = offset % tp->nvram_pagesize;
3434
3435                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3436
3437                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3438
3439                 if (page_off == 0 || i == 0)
3440                         nvram_cmd |= NVRAM_CMD_FIRST;
3441                 if (page_off == (tp->nvram_pagesize - 4))
3442                         nvram_cmd |= NVRAM_CMD_LAST;
3443
3444                 if (i == (len - 4))
3445                         nvram_cmd |= NVRAM_CMD_LAST;
3446
3447                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3448                     !tg3_flag(tp, FLASH) ||
3449                     !tg3_flag(tp, 57765_PLUS))
3450                         tw32(NVRAM_ADDR, phy_addr);
3451
3452                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3453                     !tg3_flag(tp, 5755_PLUS) &&
3454                     (tp->nvram_jedecnum == JEDEC_ST) &&
3455                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3456                         u32 cmd;
3457
3458                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3459                         ret = tg3_nvram_exec_cmd(tp, cmd);
3460                         if (ret)
3461                                 break;
3462                 }
3463                 if (!tg3_flag(tp, FLASH)) {
3464                         /* We always do complete word writes to eeprom. */
3465                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3466                 }
3467
3468                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3469                 if (ret)
3470                         break;
3471         }
3472         return ret;
3473 }
3474
3475 /* offset and length are dword aligned */
3476 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3477 {
3478         int ret;
3479
3480         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3481                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3482                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3483                 udelay(40);
3484         }
3485
3486         if (!tg3_flag(tp, NVRAM)) {
3487                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3488         } else {
3489                 u32 grc_mode;
3490
3491                 ret = tg3_nvram_lock(tp);
3492                 if (ret)
3493                         return ret;
3494
3495                 tg3_enable_nvram_access(tp);
3496                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3497                         tw32(NVRAM_WRITE1, 0x406);
3498
3499                 grc_mode = tr32(GRC_MODE);
3500                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3501
3502                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3503                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3504                                 buf);
3505                 } else {
3506                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3507                                 buf);
3508                 }
3509
3510                 grc_mode = tr32(GRC_MODE);
3511                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3512
3513                 tg3_disable_nvram_access(tp);
3514                 tg3_nvram_unlock(tp);
3515         }
3516
3517         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3518                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3519                 udelay(40);
3520         }
3521
3522         return ret;
3523 }
3524
3525 #define RX_CPU_SCRATCH_BASE     0x30000
3526 #define RX_CPU_SCRATCH_SIZE     0x04000
3527 #define TX_CPU_SCRATCH_BASE     0x34000
3528 #define TX_CPU_SCRATCH_SIZE     0x04000
3529
3530 /* tp->lock is held. */
3531 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3532 {
3533         int i;
3534         const int iters = 10000;
3535
3536         for (i = 0; i < iters; i++) {
3537                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3538                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3539                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3540                         break;
3541                 if (pci_channel_offline(tp->pdev))
3542                         return -EBUSY;
3543         }
3544
3545         return (i == iters) ? -EBUSY : 0;
3546 }
3547
3548 /* tp->lock is held. */
3549 static int tg3_rxcpu_pause(struct tg3 *tp)
3550 {
3551         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3552
3553         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3554         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3555         udelay(10);
3556
3557         return rc;
3558 }
3559
3560 /* tp->lock is held. */
3561 static int tg3_txcpu_pause(struct tg3 *tp)
3562 {
3563         return tg3_pause_cpu(tp, TX_CPU_BASE);
3564 }
3565
3566 /* tp->lock is held. */
3567 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3568 {
3569         tw32(cpu_base + CPU_STATE, 0xffffffff);
3570         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3571 }
3572
3573 /* tp->lock is held. */
3574 static void tg3_rxcpu_resume(struct tg3 *tp)
3575 {
3576         tg3_resume_cpu(tp, RX_CPU_BASE);
3577 }
3578
3579 /* tp->lock is held. */
3580 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3581 {
3582         int rc;
3583
3584         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3585
3586         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3587                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3588
3589                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3590                 return 0;
3591         }
3592         if (cpu_base == RX_CPU_BASE) {
3593                 rc = tg3_rxcpu_pause(tp);
3594         } else {
3595                 /*
3596                  * There is only an Rx CPU for the 5750 derivative in the
3597                  * BCM4785.
3598                  */
3599                 if (tg3_flag(tp, IS_SSB_CORE))
3600                         return 0;
3601
3602                 rc = tg3_txcpu_pause(tp);
3603         }
3604
3605         if (rc) {
3606                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3607                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3608                 return -ENODEV;
3609         }
3610
3611         /* Clear firmware's nvram arbitration. */
3612         if (tg3_flag(tp, NVRAM))
3613                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3614         return 0;
3615 }
3616
3617 static int tg3_fw_data_len(struct tg3 *tp,
3618                            const struct tg3_firmware_hdr *fw_hdr)
3619 {
3620         int fw_len;
3621
3622         /* Non fragmented firmware have one firmware header followed by a
3623          * contiguous chunk of data to be written. The length field in that
3624          * header is not the length of data to be written but the complete
3625          * length of the bss. The data length is determined based on
3626          * tp->fw->size minus headers.
3627          *
3628          * Fragmented firmware have a main header followed by multiple
3629          * fragments. Each fragment is identical to non fragmented firmware
3630          * with a firmware header followed by a contiguous chunk of data. In
3631          * the main header, the length field is unused and set to 0xffffffff.
3632          * In each fragment header the length is the entire size of that
3633          * fragment i.e. fragment data + header length. Data length is
3634          * therefore length field in the header minus TG3_FW_HDR_LEN.
3635          */
3636         if (tp->fw_len == 0xffffffff)
3637                 fw_len = be32_to_cpu(fw_hdr->len);
3638         else
3639                 fw_len = tp->fw->size;
3640
3641         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3642 }
3643
3644 /* tp->lock is held. */
3645 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3646                                  u32 cpu_scratch_base, int cpu_scratch_size,
3647                                  const struct tg3_firmware_hdr *fw_hdr)
3648 {
3649         int err, i;
3650         void (*write_op)(struct tg3 *, u32, u32);
3651         int total_len = tp->fw->size;
3652
3653         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3654                 netdev_err(tp->dev,
3655                            "%s: Trying to load TX cpu firmware which is 5705\n",
3656                            __func__);
3657                 return -EINVAL;
3658         }
3659
3660         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3661                 write_op = tg3_write_mem;
3662         else
3663                 write_op = tg3_write_indirect_reg32;
3664
3665         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3666                 /* It is possible that bootcode is still loading at this point.
3667                  * Get the nvram lock first before halting the cpu.
3668                  */
3669                 int lock_err = tg3_nvram_lock(tp);
3670                 err = tg3_halt_cpu(tp, cpu_base);
3671                 if (!lock_err)
3672                         tg3_nvram_unlock(tp);
3673                 if (err)
3674                         goto out;
3675
3676                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3677                         write_op(tp, cpu_scratch_base + i, 0);
3678                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3679                 tw32(cpu_base + CPU_MODE,
3680                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3681         } else {
3682                 /* Subtract additional main header for fragmented firmware and
3683                  * advance to the first fragment
3684                  */
3685                 total_len -= TG3_FW_HDR_LEN;
3686                 fw_hdr++;
3687         }
3688
3689         do {
3690                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3691                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3692                         write_op(tp, cpu_scratch_base +
3693                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3694                                      (i * sizeof(u32)),
3695                                  be32_to_cpu(fw_data[i]));
3696
3697                 total_len -= be32_to_cpu(fw_hdr->len);
3698
3699                 /* Advance to next fragment */
3700                 fw_hdr = (struct tg3_firmware_hdr *)
3701                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3702         } while (total_len > 0);
3703
3704         err = 0;
3705
3706 out:
3707         return err;
3708 }
3709
3710 /* tp->lock is held. */
3711 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3712 {
3713         int i;
3714         const int iters = 5;
3715
3716         tw32(cpu_base + CPU_STATE, 0xffffffff);
3717         tw32_f(cpu_base + CPU_PC, pc);
3718
3719         for (i = 0; i < iters; i++) {
3720                 if (tr32(cpu_base + CPU_PC) == pc)
3721                         break;
3722                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3723                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3724                 tw32_f(cpu_base + CPU_PC, pc);
3725                 udelay(1000);
3726         }
3727
3728         return (i == iters) ? -EBUSY : 0;
3729 }
3730
3731 /* tp->lock is held. */
3732 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3733 {
3734         const struct tg3_firmware_hdr *fw_hdr;
3735         int err;
3736
3737         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3738
3739         /* Firmware blob starts with version numbers, followed by
3740            start address and length. We are setting complete length.
3741            length = end_address_of_bss - start_address_of_text.
3742            Remainder is the blob to be loaded contiguously
3743            from start address. */
3744
3745         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3746                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3747                                     fw_hdr);
3748         if (err)
3749                 return err;
3750
3751         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3752                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3753                                     fw_hdr);
3754         if (err)
3755                 return err;
3756
3757         /* Now startup only the RX cpu. */
3758         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3759                                        be32_to_cpu(fw_hdr->base_addr));
3760         if (err) {
3761                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3762                            "should be %08x\n", __func__,
3763                            tr32(RX_CPU_BASE + CPU_PC),
3764                                 be32_to_cpu(fw_hdr->base_addr));
3765                 return -ENODEV;
3766         }
3767
3768         tg3_rxcpu_resume(tp);
3769
3770         return 0;
3771 }
3772
3773 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3774 {
3775         const int iters = 1000;
3776         int i;
3777         u32 val;
3778
3779         /* Wait for boot code to complete initialization and enter service
3780          * loop. It is then safe to download service patches
3781          */
3782         for (i = 0; i < iters; i++) {
3783                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3784                         break;
3785
3786                 udelay(10);
3787         }
3788
3789         if (i == iters) {
3790                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3791                 return -EBUSY;
3792         }
3793
3794         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3795         if (val & 0xff) {
3796                 netdev_warn(tp->dev,
3797                             "Other patches exist. Not downloading EEE patch\n");
3798                 return -EEXIST;
3799         }
3800
3801         return 0;
3802 }
3803
3804 /* tp->lock is held. */
3805 static void tg3_load_57766_firmware(struct tg3 *tp)
3806 {
3807         struct tg3_firmware_hdr *fw_hdr;
3808
3809         if (!tg3_flag(tp, NO_NVRAM))
3810                 return;
3811
3812         if (tg3_validate_rxcpu_state(tp))
3813                 return;
3814
3815         if (!tp->fw)
3816                 return;
3817
3818         /* This firmware blob has a different format than older firmware
3819          * releases as given below. The main difference is we have fragmented
3820          * data to be written to non-contiguous locations.
3821          *
3822          * In the beginning we have a firmware header identical to other
3823          * firmware which consists of version, base addr and length. The length
3824          * here is unused and set to 0xffffffff.
3825          *
3826          * This is followed by a series of firmware fragments which are
3827          * individually identical to previous firmware. i.e. they have the
3828          * firmware header and followed by data for that fragment. The version
3829          * field of the individual fragment header is unused.
3830          */
3831
3832         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3833         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3834                 return;
3835
3836         if (tg3_rxcpu_pause(tp))
3837                 return;
3838
3839         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3840         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3841
3842         tg3_rxcpu_resume(tp);
3843 }
3844
3845 /* tp->lock is held. */
3846 static int tg3_load_tso_firmware(struct tg3 *tp)
3847 {
3848         const struct tg3_firmware_hdr *fw_hdr;
3849         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3850         int err;
3851
3852         if (!tg3_flag(tp, FW_TSO))
3853                 return 0;
3854
3855         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3856
3857         /* Firmware blob starts with version numbers, followed by
3858            start address and length. We are setting complete length.
3859            length = end_address_of_bss - start_address_of_text.
3860            Remainder is the blob to be loaded contiguously
3861            from start address. */
3862
3863         cpu_scratch_size = tp->fw_len;
3864
3865         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3866                 cpu_base = RX_CPU_BASE;
3867                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3868         } else {
3869                 cpu_base = TX_CPU_BASE;
3870                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3871                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3872         }
3873
3874         err = tg3_load_firmware_cpu(tp, cpu_base,
3875                                     cpu_scratch_base, cpu_scratch_size,
3876                                     fw_hdr);
3877         if (err)
3878                 return err;
3879
3880         /* Now startup the cpu. */
3881         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3882                                        be32_to_cpu(fw_hdr->base_addr));
3883         if (err) {
3884                 netdev_err(tp->dev,
3885                            "%s fails to set CPU PC, is %08x should be %08x\n",
3886                            __func__, tr32(cpu_base + CPU_PC),
3887                            be32_to_cpu(fw_hdr->base_addr));
3888                 return -ENODEV;
3889         }
3890
3891         tg3_resume_cpu(tp, cpu_base);
3892         return 0;
3893 }
3894
3895
3896 /* tp->lock is held. */
3897 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3898 {
3899         u32 addr_high, addr_low;
3900         int i;
3901
3902         addr_high = ((tp->dev->dev_addr[0] << 8) |
3903                      tp->dev->dev_addr[1]);
3904         addr_low = ((tp->dev->dev_addr[2] << 24) |
3905                     (tp->dev->dev_addr[3] << 16) |
3906                     (tp->dev->dev_addr[4] <<  8) |
3907                     (tp->dev->dev_addr[5] <<  0));
3908         for (i = 0; i < 4; i++) {
3909                 if (i == 1 && skip_mac_1)
3910                         continue;
3911                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3912                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3913         }
3914
3915         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3916             tg3_asic_rev(tp) == ASIC_REV_5704) {
3917                 for (i = 0; i < 12; i++) {
3918                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3919                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3920                 }
3921         }
3922
3923         addr_high = (tp->dev->dev_addr[0] +
3924                      tp->dev->dev_addr[1] +
3925                      tp->dev->dev_addr[2] +
3926                      tp->dev->dev_addr[3] +
3927                      tp->dev->dev_addr[4] +
3928                      tp->dev->dev_addr[5]) &
3929                 TX_BACKOFF_SEED_MASK;
3930         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3931 }
3932
3933 static void tg3_enable_register_access(struct tg3 *tp)
3934 {
3935         /*
3936          * Make sure register accesses (indirect or otherwise) will function
3937          * correctly.
3938          */
3939         pci_write_config_dword(tp->pdev,
3940                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3941 }
3942
3943 static int tg3_power_up(struct tg3 *tp)
3944 {
3945         int err;
3946
3947         tg3_enable_register_access(tp);
3948
3949         err = pci_set_power_state(tp->pdev, PCI_D0);
3950         if (!err) {
3951                 /* Switch out of Vaux if it is a NIC */
3952                 tg3_pwrsrc_switch_to_vmain(tp);
3953         } else {
3954                 netdev_err(tp->dev, "Transition to D0 failed\n");
3955         }
3956
3957         return err;
3958 }
3959
3960 static int tg3_setup_phy(struct tg3 *, bool);
3961
3962 static int tg3_power_down_prepare(struct tg3 *tp)
3963 {
3964         u32 misc_host_ctrl;
3965         bool device_should_wake, do_low_power;
3966
3967         tg3_enable_register_access(tp);
3968
3969         /* Restore the CLKREQ setting. */
3970         if (tg3_flag(tp, CLKREQ_BUG))
3971                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3972                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3973
3974         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3975         tw32(TG3PCI_MISC_HOST_CTRL,
3976              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3977
3978         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3979                              tg3_flag(tp, WOL_ENABLE);
3980
3981         if (tg3_flag(tp, USE_PHYLIB)) {
3982                 do_low_power = false;
3983                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3984                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3985                         struct phy_device *phydev;
3986                         u32 phyid, advertising;
3987
3988                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3989
3990                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3991
3992                         tp->link_config.speed = phydev->speed;
3993                         tp->link_config.duplex = phydev->duplex;
3994                         tp->link_config.autoneg = phydev->autoneg;
3995                         tp->link_config.advertising = phydev->advertising;
3996
3997                         advertising = ADVERTISED_TP |
3998                                       ADVERTISED_Pause |
3999                                       ADVERTISED_Autoneg |
4000                                       ADVERTISED_10baseT_Half;
4001
4002                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4003                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4004                                         advertising |=
4005                                                 ADVERTISED_100baseT_Half |
4006                                                 ADVERTISED_100baseT_Full |
4007                                                 ADVERTISED_10baseT_Full;
4008                                 else
4009                                         advertising |= ADVERTISED_10baseT_Full;
4010                         }
4011
4012                         phydev->advertising = advertising;
4013
4014                         phy_start_aneg(phydev);
4015
4016                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4017                         if (phyid != PHY_ID_BCMAC131) {
4018                                 phyid &= PHY_BCM_OUI_MASK;
4019                                 if (phyid == PHY_BCM_OUI_1 ||
4020                                     phyid == PHY_BCM_OUI_2 ||
4021                                     phyid == PHY_BCM_OUI_3)
4022                                         do_low_power = true;
4023                         }
4024                 }
4025         } else {
4026                 do_low_power = true;
4027
4028                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4029                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4030
4031                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4032                         tg3_setup_phy(tp, false);
4033         }
4034
4035         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4036                 u32 val;
4037
4038                 val = tr32(GRC_VCPU_EXT_CTRL);
4039                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4040         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4041                 int i;
4042                 u32 val;
4043
4044                 for (i = 0; i < 200; i++) {
4045                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4046                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4047                                 break;
4048                         msleep(1);
4049                 }
4050         }
4051         if (tg3_flag(tp, WOL_CAP))
4052                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4053                                                      WOL_DRV_STATE_SHUTDOWN |
4054                                                      WOL_DRV_WOL |
4055                                                      WOL_SET_MAGIC_PKT);
4056
4057         if (device_should_wake) {
4058                 u32 mac_mode;
4059
4060                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4061                         if (do_low_power &&
4062                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4063                                 tg3_phy_auxctl_write(tp,
4064                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4065                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4066                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4067                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4068                                 udelay(40);
4069                         }
4070
4071                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4072                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4073                         else if (tp->phy_flags &
4074                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4075                                 if (tp->link_config.active_speed == SPEED_1000)
4076                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4077                                 else
4078                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4079                         } else
4080                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4081
4082                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4083                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4084                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4085                                              SPEED_100 : SPEED_10;
4086                                 if (tg3_5700_link_polarity(tp, speed))
4087                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4088                                 else
4089                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4090                         }
4091                 } else {
4092                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4093                 }
4094
4095                 if (!tg3_flag(tp, 5750_PLUS))
4096                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4097
4098                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4099                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4100                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4101                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4102
4103                 if (tg3_flag(tp, ENABLE_APE))
4104                         mac_mode |= MAC_MODE_APE_TX_EN |
4105                                     MAC_MODE_APE_RX_EN |
4106                                     MAC_MODE_TDE_ENABLE;
4107
4108                 tw32_f(MAC_MODE, mac_mode);
4109                 udelay(100);
4110
4111                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4112                 udelay(10);
4113         }
4114
4115         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4116             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4117              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4118                 u32 base_val;
4119
4120                 base_val = tp->pci_clock_ctrl;
4121                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4122                              CLOCK_CTRL_TXCLK_DISABLE);
4123
4124                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4125                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4126         } else if (tg3_flag(tp, 5780_CLASS) ||
4127                    tg3_flag(tp, CPMU_PRESENT) ||
4128                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4129                 /* do nothing */
4130         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4131                 u32 newbits1, newbits2;
4132
4133                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4134                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4135                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4136                                     CLOCK_CTRL_TXCLK_DISABLE |
4137                                     CLOCK_CTRL_ALTCLK);
4138                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4139                 } else if (tg3_flag(tp, 5705_PLUS)) {
4140                         newbits1 = CLOCK_CTRL_625_CORE;
4141                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4142                 } else {
4143                         newbits1 = CLOCK_CTRL_ALTCLK;
4144                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4145                 }
4146
4147                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4148                             40);
4149
4150                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4151                             40);
4152
4153                 if (!tg3_flag(tp, 5705_PLUS)) {
4154                         u32 newbits3;
4155
4156                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4157                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4158                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4159                                             CLOCK_CTRL_TXCLK_DISABLE |
4160                                             CLOCK_CTRL_44MHZ_CORE);
4161                         } else {
4162                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4163                         }
4164
4165                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4166                                     tp->pci_clock_ctrl | newbits3, 40);
4167                 }
4168         }
4169
4170         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4171                 tg3_power_down_phy(tp, do_low_power);
4172
4173         tg3_frob_aux_power(tp, true);
4174
4175         /* Workaround for unstable PLL clock */
4176         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4177             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4178              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4179                 u32 val = tr32(0x7d00);
4180
4181                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4182                 tw32(0x7d00, val);
4183                 if (!tg3_flag(tp, ENABLE_ASF)) {
4184                         int err;
4185
4186                         err = tg3_nvram_lock(tp);
4187                         tg3_halt_cpu(tp, RX_CPU_BASE);
4188                         if (!err)
4189                                 tg3_nvram_unlock(tp);
4190                 }
4191         }
4192
4193         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4194
4195         return 0;
4196 }
4197
4198 static void tg3_power_down(struct tg3 *tp)
4199 {
4200         tg3_power_down_prepare(tp);
4201
4202         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4203         pci_set_power_state(tp->pdev, PCI_D3hot);
4204 }
4205
4206 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4207 {
4208         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4209         case MII_TG3_AUX_STAT_10HALF:
4210                 *speed = SPEED_10;
4211                 *duplex = DUPLEX_HALF;
4212                 break;
4213
4214         case MII_TG3_AUX_STAT_10FULL:
4215                 *speed = SPEED_10;
4216                 *duplex = DUPLEX_FULL;
4217                 break;
4218
4219         case MII_TG3_AUX_STAT_100HALF:
4220                 *speed = SPEED_100;
4221                 *duplex = DUPLEX_HALF;
4222                 break;
4223
4224         case MII_TG3_AUX_STAT_100FULL:
4225                 *speed = SPEED_100;
4226                 *duplex = DUPLEX_FULL;
4227                 break;
4228
4229         case MII_TG3_AUX_STAT_1000HALF:
4230                 *speed = SPEED_1000;
4231                 *duplex = DUPLEX_HALF;
4232                 break;
4233
4234         case MII_TG3_AUX_STAT_1000FULL:
4235                 *speed = SPEED_1000;
4236                 *duplex = DUPLEX_FULL;
4237                 break;
4238
4239         default:
4240                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4241                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4242                                  SPEED_10;
4243                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4244                                   DUPLEX_HALF;
4245                         break;
4246                 }
4247                 *speed = SPEED_UNKNOWN;
4248                 *duplex = DUPLEX_UNKNOWN;
4249                 break;
4250         }
4251 }
4252
4253 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4254 {
4255         int err = 0;
4256         u32 val, new_adv;
4257
4258         new_adv = ADVERTISE_CSMA;
4259         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4260         new_adv |= mii_advertise_flowctrl(flowctrl);
4261
4262         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4263         if (err)
4264                 goto done;
4265
4266         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4267                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4268
4269                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4270                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4271                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4272
4273                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4274                 if (err)
4275                         goto done;
4276         }
4277
4278         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4279                 goto done;
4280
4281         tw32(TG3_CPMU_EEE_MODE,
4282              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4283
4284         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4285         if (!err) {
4286                 u32 err2;
4287
4288                 val = 0;
4289                 /* Advertise 100-BaseTX EEE ability */
4290                 if (advertise & ADVERTISED_100baseT_Full)
4291                         val |= MDIO_AN_EEE_ADV_100TX;
4292                 /* Advertise 1000-BaseT EEE ability */
4293                 if (advertise & ADVERTISED_1000baseT_Full)
4294                         val |= MDIO_AN_EEE_ADV_1000T;
4295                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4296                 if (err)
4297                         val = 0;
4298
4299                 switch (tg3_asic_rev(tp)) {
4300                 case ASIC_REV_5717:
4301                 case ASIC_REV_57765:
4302                 case ASIC_REV_57766:
4303                 case ASIC_REV_5719:
4304                         /* If we advertised any eee advertisements above... */
4305                         if (val)
4306                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4307                                       MII_TG3_DSP_TAP26_RMRXSTO |
4308                                       MII_TG3_DSP_TAP26_OPCSINPT;
4309                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4310                         /* Fall through */
4311                 case ASIC_REV_5720:
4312                 case ASIC_REV_5762:
4313                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4314                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4315                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4316                 }
4317
4318                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4319                 if (!err)
4320                         err = err2;
4321         }
4322
4323 done:
4324         return err;
4325 }
4326
4327 static void tg3_phy_copper_begin(struct tg3 *tp)
4328 {
4329         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4330             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4331                 u32 adv, fc;
4332
4333                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4334                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4335                         adv = ADVERTISED_10baseT_Half |
4336                               ADVERTISED_10baseT_Full;
4337                         if (tg3_flag(tp, WOL_SPEED_100MB))
4338                                 adv |= ADVERTISED_100baseT_Half |
4339                                        ADVERTISED_100baseT_Full;
4340                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4341                                 adv |= ADVERTISED_1000baseT_Half |
4342                                        ADVERTISED_1000baseT_Full;
4343
4344                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4345                 } else {
4346                         adv = tp->link_config.advertising;
4347                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4348                                 adv &= ~(ADVERTISED_1000baseT_Half |
4349                                          ADVERTISED_1000baseT_Full);
4350
4351                         fc = tp->link_config.flowctrl;
4352                 }
4353
4354                 tg3_phy_autoneg_cfg(tp, adv, fc);
4355
4356                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4357                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4358                         /* Normally during power down we want to autonegotiate
4359                          * the lowest possible speed for WOL. However, to avoid
4360                          * link flap, we leave it untouched.
4361                          */
4362                         return;
4363                 }
4364
4365                 tg3_writephy(tp, MII_BMCR,
4366                              BMCR_ANENABLE | BMCR_ANRESTART);
4367         } else {
4368                 int i;
4369                 u32 bmcr, orig_bmcr;
4370
4371                 tp->link_config.active_speed = tp->link_config.speed;
4372                 tp->link_config.active_duplex = tp->link_config.duplex;
4373
4374                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4375                         /* With autoneg disabled, 5715 only links up when the
4376                          * advertisement register has the configured speed
4377                          * enabled.
4378                          */
4379                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4380                 }
4381
4382                 bmcr = 0;
4383                 switch (tp->link_config.speed) {
4384                 default:
4385                 case SPEED_10:
4386                         break;
4387
4388                 case SPEED_100:
4389                         bmcr |= BMCR_SPEED100;
4390                         break;
4391
4392                 case SPEED_1000:
4393                         bmcr |= BMCR_SPEED1000;
4394                         break;
4395                 }
4396
4397                 if (tp->link_config.duplex == DUPLEX_FULL)
4398                         bmcr |= BMCR_FULLDPLX;
4399
4400                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4401                     (bmcr != orig_bmcr)) {
4402                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4403                         for (i = 0; i < 1500; i++) {
4404                                 u32 tmp;
4405
4406                                 udelay(10);
4407                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4408                                     tg3_readphy(tp, MII_BMSR, &tmp))
4409                                         continue;
4410                                 if (!(tmp & BMSR_LSTATUS)) {
4411                                         udelay(40);
4412                                         break;
4413                                 }
4414                         }
4415                         tg3_writephy(tp, MII_BMCR, bmcr);
4416                         udelay(40);
4417                 }
4418         }
4419 }
4420
4421 static int tg3_phy_pull_config(struct tg3 *tp)
4422 {
4423         int err;
4424         u32 val;
4425
4426         err = tg3_readphy(tp, MII_BMCR, &val);
4427         if (err)
4428                 goto done;
4429
4430         if (!(val & BMCR_ANENABLE)) {
4431                 tp->link_config.autoneg = AUTONEG_DISABLE;
4432                 tp->link_config.advertising = 0;
4433                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4434
4435                 err = -EIO;
4436
4437                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4438                 case 0:
4439                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4440                                 goto done;
4441
4442                         tp->link_config.speed = SPEED_10;
4443                         break;
4444                 case BMCR_SPEED100:
4445                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4446                                 goto done;
4447
4448                         tp->link_config.speed = SPEED_100;
4449                         break;
4450                 case BMCR_SPEED1000:
4451                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4452                                 tp->link_config.speed = SPEED_1000;
4453                                 break;
4454                         }
4455                         /* Fall through */
4456                 default:
4457                         goto done;
4458                 }
4459
4460                 if (val & BMCR_FULLDPLX)
4461                         tp->link_config.duplex = DUPLEX_FULL;
4462                 else
4463                         tp->link_config.duplex = DUPLEX_HALF;
4464
4465                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4466
4467                 err = 0;
4468                 goto done;
4469         }
4470
4471         tp->link_config.autoneg = AUTONEG_ENABLE;
4472         tp->link_config.advertising = ADVERTISED_Autoneg;
4473         tg3_flag_set(tp, PAUSE_AUTONEG);
4474
4475         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4476                 u32 adv;
4477
4478                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4479                 if (err)
4480                         goto done;
4481
4482                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4483                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4484
4485                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4486         } else {
4487                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4488         }
4489
4490         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4491                 u32 adv;
4492
4493                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4494                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4495                         if (err)
4496                                 goto done;
4497
4498                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4499                 } else {
4500                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4501                         if (err)
4502                                 goto done;
4503
4504                         adv = tg3_decode_flowctrl_1000X(val);
4505                         tp->link_config.flowctrl = adv;
4506
4507                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4508                         adv = mii_adv_to_ethtool_adv_x(val);
4509                 }
4510
4511                 tp->link_config.advertising |= adv;
4512         }
4513
4514 done:
4515         return err;
4516 }
4517
4518 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4519 {
4520         int err;
4521
4522         /* Turn off tap power management. */
4523         /* Set Extended packet length bit */
4524         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4525
4526         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4527         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4528         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4529         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4530         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4531
4532         udelay(40);
4533
4534         return err;
4535 }
4536
4537 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4538 {
4539         u32 val;
4540         u32 tgtadv = 0;
4541         u32 advertising = tp->link_config.advertising;
4542
4543         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4544                 return true;
4545
4546         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4547                 return false;
4548
4549         val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4550
4551
4552         if (advertising & ADVERTISED_100baseT_Full)
4553                 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4554         if (advertising & ADVERTISED_1000baseT_Full)
4555                 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4556
4557         if (val != tgtadv)
4558                 return false;
4559
4560         return true;
4561 }
4562
4563 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4564 {
4565         u32 advmsk, tgtadv, advertising;
4566
4567         advertising = tp->link_config.advertising;
4568         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4569
4570         advmsk = ADVERTISE_ALL;
4571         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4572                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4573                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4574         }
4575
4576         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4577                 return false;
4578
4579         if ((*lcladv & advmsk) != tgtadv)
4580                 return false;
4581
4582         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4583                 u32 tg3_ctrl;
4584
4585                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4586
4587                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4588                         return false;
4589
4590                 if (tgtadv &&
4591                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4592                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4593                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4594                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4595                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4596                 } else {
4597                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4598                 }
4599
4600                 if (tg3_ctrl != tgtadv)
4601                         return false;
4602         }
4603
4604         return true;
4605 }
4606
4607 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4608 {
4609         u32 lpeth = 0;
4610
4611         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4612                 u32 val;
4613
4614                 if (tg3_readphy(tp, MII_STAT1000, &val))
4615                         return false;
4616
4617                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4618         }
4619
4620         if (tg3_readphy(tp, MII_LPA, rmtadv))
4621                 return false;
4622
4623         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4624         tp->link_config.rmt_adv = lpeth;
4625
4626         return true;
4627 }
4628
4629 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4630 {
4631         if (curr_link_up != tp->link_up) {
4632                 if (curr_link_up) {
4633                         netif_carrier_on(tp->dev);
4634                 } else {
4635                         netif_carrier_off(tp->dev);
4636                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4637                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4638                 }
4639
4640                 tg3_link_report(tp);
4641                 return true;
4642         }
4643
4644         return false;
4645 }
4646
4647 static void tg3_clear_mac_status(struct tg3 *tp)
4648 {
4649         tw32(MAC_EVENT, 0);
4650
4651         tw32_f(MAC_STATUS,
4652                MAC_STATUS_SYNC_CHANGED |
4653                MAC_STATUS_CFG_CHANGED |
4654                MAC_STATUS_MI_COMPLETION |
4655                MAC_STATUS_LNKSTATE_CHANGED);
4656         udelay(40);
4657 }
4658
4659 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4660 {
4661         bool current_link_up;
4662         u32 bmsr, val;
4663         u32 lcl_adv, rmt_adv;
4664         u16 current_speed;
4665         u8 current_duplex;
4666         int i, err;
4667
4668         tg3_clear_mac_status(tp);
4669
4670         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4671                 tw32_f(MAC_MI_MODE,
4672                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4673                 udelay(80);
4674         }
4675
4676         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4677
4678         /* Some third-party PHYs need to be reset on link going
4679          * down.
4680          */
4681         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4682              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4683              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4684             tp->link_up) {
4685                 tg3_readphy(tp, MII_BMSR, &bmsr);
4686                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4687                     !(bmsr & BMSR_LSTATUS))
4688                         force_reset = true;
4689         }
4690         if (force_reset)
4691                 tg3_phy_reset(tp);
4692
4693         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4694                 tg3_readphy(tp, MII_BMSR, &bmsr);
4695                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4696                     !tg3_flag(tp, INIT_COMPLETE))
4697                         bmsr = 0;
4698
4699                 if (!(bmsr & BMSR_LSTATUS)) {
4700                         err = tg3_init_5401phy_dsp(tp);
4701                         if (err)
4702                                 return err;
4703
4704                         tg3_readphy(tp, MII_BMSR, &bmsr);
4705                         for (i = 0; i < 1000; i++) {
4706                                 udelay(10);
4707                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4708                                     (bmsr & BMSR_LSTATUS)) {
4709                                         udelay(40);
4710                                         break;
4711                                 }
4712                         }
4713
4714                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4715                             TG3_PHY_REV_BCM5401_B0 &&
4716                             !(bmsr & BMSR_LSTATUS) &&
4717                             tp->link_config.active_speed == SPEED_1000) {
4718                                 err = tg3_phy_reset(tp);
4719                                 if (!err)
4720                                         err = tg3_init_5401phy_dsp(tp);
4721                                 if (err)
4722                                         return err;
4723                         }
4724                 }
4725         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4726                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4727                 /* 5701 {A0,B0} CRC bug workaround */
4728                 tg3_writephy(tp, 0x15, 0x0a75);
4729                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4730                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4731                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4732         }
4733
4734         /* Clear pending interrupts... */
4735         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4736         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4737
4738         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4739                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4740         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4741                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4742
4743         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4744             tg3_asic_rev(tp) == ASIC_REV_5701) {
4745                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4746                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4747                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4748                 else
4749                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4750         }
4751
4752         current_link_up = false;
4753         current_speed = SPEED_UNKNOWN;
4754         current_duplex = DUPLEX_UNKNOWN;
4755         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4756         tp->link_config.rmt_adv = 0;
4757
4758         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4759                 err = tg3_phy_auxctl_read(tp,
4760                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4761                                           &val);
4762                 if (!err && !(val & (1 << 10))) {
4763                         tg3_phy_auxctl_write(tp,
4764                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4765                                              val | (1 << 10));
4766                         goto relink;
4767                 }
4768         }
4769
4770         bmsr = 0;
4771         for (i = 0; i < 100; i++) {
4772                 tg3_readphy(tp, MII_BMSR, &bmsr);
4773                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4774                     (bmsr & BMSR_LSTATUS))
4775                         break;
4776                 udelay(40);
4777         }
4778
4779         if (bmsr & BMSR_LSTATUS) {
4780                 u32 aux_stat, bmcr;
4781
4782                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4783                 for (i = 0; i < 2000; i++) {
4784                         udelay(10);
4785                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4786                             aux_stat)
4787                                 break;
4788                 }
4789
4790                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4791                                              &current_speed,
4792                                              &current_duplex);
4793
4794                 bmcr = 0;
4795                 for (i = 0; i < 200; i++) {
4796                         tg3_readphy(tp, MII_BMCR, &bmcr);
4797                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4798                                 continue;
4799                         if (bmcr && bmcr != 0x7fff)
4800                                 break;
4801                         udelay(10);
4802                 }
4803
4804                 lcl_adv = 0;
4805                 rmt_adv = 0;
4806
4807                 tp->link_config.active_speed = current_speed;
4808                 tp->link_config.active_duplex = current_duplex;
4809
4810                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4811                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4812
4813                         if ((bmcr & BMCR_ANENABLE) &&
4814                             eee_config_ok &&
4815                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4816                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4817                                 current_link_up = true;
4818
4819                         /* EEE settings changes take effect only after a phy
4820                          * reset.  If we have skipped a reset due to Link Flap
4821                          * Avoidance being enabled, do it now.
4822                          */
4823                         if (!eee_config_ok &&
4824                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4825                             !force_reset)
4826                                 tg3_phy_reset(tp);
4827                 } else {
4828                         if (!(bmcr & BMCR_ANENABLE) &&
4829                             tp->link_config.speed == current_speed &&
4830                             tp->link_config.duplex == current_duplex) {
4831                                 current_link_up = true;
4832                         }
4833                 }
4834
4835                 if (current_link_up &&
4836                     tp->link_config.active_duplex == DUPLEX_FULL) {
4837                         u32 reg, bit;
4838
4839                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4840                                 reg = MII_TG3_FET_GEN_STAT;
4841                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4842                         } else {
4843                                 reg = MII_TG3_EXT_STAT;
4844                                 bit = MII_TG3_EXT_STAT_MDIX;
4845                         }
4846
4847                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4848                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4849
4850                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4851                 }
4852         }
4853
4854 relink:
4855         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4856                 tg3_phy_copper_begin(tp);
4857
4858                 if (tg3_flag(tp, ROBOSWITCH)) {
4859                         current_link_up = true;
4860                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4861                         current_speed = SPEED_1000;
4862                         current_duplex = DUPLEX_FULL;
4863                         tp->link_config.active_speed = current_speed;
4864                         tp->link_config.active_duplex = current_duplex;
4865                 }
4866
4867                 tg3_readphy(tp, MII_BMSR, &bmsr);
4868                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4869                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4870                         current_link_up = true;
4871         }
4872
4873         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4874         if (current_link_up) {
4875                 if (tp->link_config.active_speed == SPEED_100 ||
4876                     tp->link_config.active_speed == SPEED_10)
4877                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4878                 else
4879                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4880         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4881                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4882         else
4883                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4884
4885         /* In order for the 5750 core in BCM4785 chip to work properly
4886          * in RGMII mode, the Led Control Register must be set up.
4887          */
4888         if (tg3_flag(tp, RGMII_MODE)) {
4889                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4890                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4891
4892                 if (tp->link_config.active_speed == SPEED_10)
4893                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4894                 else if (tp->link_config.active_speed == SPEED_100)
4895                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4896                                      LED_CTRL_100MBPS_ON);
4897                 else if (tp->link_config.active_speed == SPEED_1000)
4898                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4899                                      LED_CTRL_1000MBPS_ON);
4900
4901                 tw32(MAC_LED_CTRL, led_ctrl);
4902                 udelay(40);
4903         }
4904
4905         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4906         if (tp->link_config.active_duplex == DUPLEX_HALF)
4907                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4908
4909         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4910                 if (current_link_up &&
4911                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4912                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4913                 else
4914                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4915         }
4916
4917         /* ??? Without this setting Netgear GA302T PHY does not
4918          * ??? send/receive packets...
4919          */
4920         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4921             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4922                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4923                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4924                 udelay(80);
4925         }
4926
4927         tw32_f(MAC_MODE, tp->mac_mode);
4928         udelay(40);
4929
4930         tg3_phy_eee_adjust(tp, current_link_up);
4931
4932         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4933                 /* Polled via timer. */
4934                 tw32_f(MAC_EVENT, 0);
4935         } else {
4936                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4937         }
4938         udelay(40);
4939
4940         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4941             current_link_up &&
4942             tp->link_config.active_speed == SPEED_1000 &&
4943             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4944                 udelay(120);
4945                 tw32_f(MAC_STATUS,
4946                      (MAC_STATUS_SYNC_CHANGED |
4947                       MAC_STATUS_CFG_CHANGED));
4948                 udelay(40);
4949                 tg3_write_mem(tp,
4950                               NIC_SRAM_FIRMWARE_MBOX,
4951                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4952         }
4953
4954         /* Prevent send BD corruption. */
4955         if (tg3_flag(tp, CLKREQ_BUG)) {
4956                 if (tp->link_config.active_speed == SPEED_100 ||
4957                     tp->link_config.active_speed == SPEED_10)
4958                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4959                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4960                 else
4961                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4962                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4963         }
4964
4965         tg3_test_and_report_link_chg(tp, current_link_up);
4966
4967         return 0;
4968 }
4969
4970 struct tg3_fiber_aneginfo {
4971         int state;
4972 #define ANEG_STATE_UNKNOWN              0
4973 #define ANEG_STATE_AN_ENABLE            1
4974 #define ANEG_STATE_RESTART_INIT         2
4975 #define ANEG_STATE_RESTART              3
4976 #define ANEG_STATE_DISABLE_LINK_OK      4
4977 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4978 #define ANEG_STATE_ABILITY_DETECT       6
4979 #define ANEG_STATE_ACK_DETECT_INIT      7
4980 #define ANEG_STATE_ACK_DETECT           8
4981 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4982 #define ANEG_STATE_COMPLETE_ACK         10
4983 #define ANEG_STATE_IDLE_DETECT_INIT     11
4984 #define ANEG_STATE_IDLE_DETECT          12
4985 #define ANEG_STATE_LINK_OK              13
4986 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4987 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4988
4989         u32 flags;
4990 #define MR_AN_ENABLE            0x00000001
4991 #define MR_RESTART_AN           0x00000002
4992 #define MR_AN_COMPLETE          0x00000004
4993 #define MR_PAGE_RX              0x00000008
4994 #define MR_NP_LOADED            0x00000010
4995 #define MR_TOGGLE_TX            0x00000020
4996 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4997 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4998 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4999 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5000 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5001 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5002 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5003 #define MR_TOGGLE_RX            0x00002000
5004 #define MR_NP_RX                0x00004000
5005
5006 #define MR_LINK_OK              0x80000000
5007
5008         unsigned long link_time, cur_time;
5009
5010         u32 ability_match_cfg;
5011         int ability_match_count;
5012
5013         char ability_match, idle_match, ack_match;
5014
5015         u32 txconfig, rxconfig;
5016 #define ANEG_CFG_NP             0x00000080
5017 #define ANEG_CFG_ACK            0x00000040
5018 #define ANEG_CFG_RF2            0x00000020
5019 #define ANEG_CFG_RF1            0x00000010
5020 #define ANEG_CFG_PS2            0x00000001
5021 #define ANEG_CFG_PS1            0x00008000
5022 #define ANEG_CFG_HD             0x00004000
5023 #define ANEG_CFG_FD             0x00002000
5024 #define ANEG_CFG_INVAL          0x00001f06
5025
5026 };
5027 #define ANEG_OK         0
5028 #define ANEG_DONE       1
5029 #define ANEG_TIMER_ENAB 2
5030 #define ANEG_FAILED     -1
5031
5032 #define ANEG_STATE_SETTLE_TIME  10000
5033
5034 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5035                                    struct tg3_fiber_aneginfo *ap)
5036 {
5037         u16 flowctrl;
5038         unsigned long delta;
5039         u32 rx_cfg_reg;
5040         int ret;
5041
5042         if (ap->state == ANEG_STATE_UNKNOWN) {
5043                 ap->rxconfig = 0;
5044                 ap->link_time = 0;
5045                 ap->cur_time = 0;
5046                 ap->ability_match_cfg = 0;
5047                 ap->ability_match_count = 0;
5048                 ap->ability_match = 0;
5049                 ap->idle_match = 0;
5050                 ap->ack_match = 0;
5051         }
5052         ap->cur_time++;
5053
5054         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5055                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5056
5057                 if (rx_cfg_reg != ap->ability_match_cfg) {
5058                         ap->ability_match_cfg = rx_cfg_reg;
5059                         ap->ability_match = 0;
5060                         ap->ability_match_count = 0;
5061                 } else {
5062                         if (++ap->ability_match_count > 1) {
5063                                 ap->ability_match = 1;
5064                                 ap->ability_match_cfg = rx_cfg_reg;
5065                         }
5066                 }
5067                 if (rx_cfg_reg & ANEG_CFG_ACK)
5068                         ap->ack_match = 1;
5069                 else
5070                         ap->ack_match = 0;
5071
5072                 ap->idle_match = 0;
5073         } else {
5074                 ap->idle_match = 1;
5075                 ap->ability_match_cfg = 0;
5076                 ap->ability_match_count = 0;
5077                 ap->ability_match = 0;
5078                 ap->ack_match = 0;
5079
5080                 rx_cfg_reg = 0;
5081         }
5082
5083         ap->rxconfig = rx_cfg_reg;
5084         ret = ANEG_OK;
5085
5086         switch (ap->state) {
5087         case ANEG_STATE_UNKNOWN:
5088                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5089                         ap->state = ANEG_STATE_AN_ENABLE;
5090
5091                 /* fallthru */
5092         case ANEG_STATE_AN_ENABLE:
5093                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5094                 if (ap->flags & MR_AN_ENABLE) {
5095                         ap->link_time = 0;
5096                         ap->cur_time = 0;
5097                         ap->ability_match_cfg = 0;
5098                         ap->ability_match_count = 0;
5099                         ap->ability_match = 0;
5100                         ap->idle_match = 0;
5101                         ap->ack_match = 0;
5102
5103                         ap->state = ANEG_STATE_RESTART_INIT;
5104                 } else {
5105                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5106                 }
5107                 break;
5108
5109         case ANEG_STATE_RESTART_INIT:
5110                 ap->link_time = ap->cur_time;
5111                 ap->flags &= ~(MR_NP_LOADED);
5112                 ap->txconfig = 0;
5113                 tw32(MAC_TX_AUTO_NEG, 0);
5114                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5115                 tw32_f(MAC_MODE, tp->mac_mode);
5116                 udelay(40);
5117
5118                 ret = ANEG_TIMER_ENAB;
5119                 ap->state = ANEG_STATE_RESTART;
5120
5121                 /* fallthru */
5122         case ANEG_STATE_RESTART:
5123                 delta = ap->cur_time - ap->link_time;
5124                 if (delta > ANEG_STATE_SETTLE_TIME)
5125                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5126                 else
5127                         ret = ANEG_TIMER_ENAB;
5128                 break;
5129
5130         case ANEG_STATE_DISABLE_LINK_OK:
5131                 ret = ANEG_DONE;
5132                 break;
5133
5134         case ANEG_STATE_ABILITY_DETECT_INIT:
5135                 ap->flags &= ~(MR_TOGGLE_TX);
5136                 ap->txconfig = ANEG_CFG_FD;
5137                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5138                 if (flowctrl & ADVERTISE_1000XPAUSE)
5139                         ap->txconfig |= ANEG_CFG_PS1;
5140                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5141                         ap->txconfig |= ANEG_CFG_PS2;
5142                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5143                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5144                 tw32_f(MAC_MODE, tp->mac_mode);
5145                 udelay(40);
5146
5147                 ap->state = ANEG_STATE_ABILITY_DETECT;
5148                 break;
5149
5150         case ANEG_STATE_ABILITY_DETECT:
5151                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5152                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5153                 break;
5154
5155         case ANEG_STATE_ACK_DETECT_INIT:
5156                 ap->txconfig |= ANEG_CFG_ACK;
5157                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5158                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5159                 tw32_f(MAC_MODE, tp->mac_mode);
5160                 udelay(40);
5161
5162                 ap->state = ANEG_STATE_ACK_DETECT;
5163
5164                 /* fallthru */
5165         case ANEG_STATE_ACK_DETECT:
5166                 if (ap->ack_match != 0) {
5167                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5168                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5169                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5170                         } else {
5171                                 ap->state = ANEG_STATE_AN_ENABLE;
5172                         }
5173                 } else if (ap->ability_match != 0 &&
5174                            ap->rxconfig == 0) {
5175                         ap->state = ANEG_STATE_AN_ENABLE;
5176                 }
5177                 break;
5178
5179         case ANEG_STATE_COMPLETE_ACK_INIT:
5180                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5181                         ret = ANEG_FAILED;
5182                         break;
5183                 }
5184                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5185                                MR_LP_ADV_HALF_DUPLEX |
5186                                MR_LP_ADV_SYM_PAUSE |
5187                                MR_LP_ADV_ASYM_PAUSE |
5188                                MR_LP_ADV_REMOTE_FAULT1 |
5189                                MR_LP_ADV_REMOTE_FAULT2 |
5190                                MR_LP_ADV_NEXT_PAGE |
5191                                MR_TOGGLE_RX |
5192                                MR_NP_RX);
5193                 if (ap->rxconfig & ANEG_CFG_FD)
5194                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5195                 if (ap->rxconfig & ANEG_CFG_HD)
5196                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5197                 if (ap->rxconfig & ANEG_CFG_PS1)
5198                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5199                 if (ap->rxconfig & ANEG_CFG_PS2)
5200                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5201                 if (ap->rxconfig & ANEG_CFG_RF1)
5202                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5203                 if (ap->rxconfig & ANEG_CFG_RF2)
5204                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5205                 if (ap->rxconfig & ANEG_CFG_NP)
5206                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5207
5208                 ap->link_time = ap->cur_time;
5209
5210                 ap->flags ^= (MR_TOGGLE_TX);
5211                 if (ap->rxconfig & 0x0008)
5212                         ap->flags |= MR_TOGGLE_RX;
5213                 if (ap->rxconfig & ANEG_CFG_NP)
5214                         ap->flags |= MR_NP_RX;
5215                 ap->flags |= MR_PAGE_RX;
5216
5217                 ap->state = ANEG_STATE_COMPLETE_ACK;
5218                 ret = ANEG_TIMER_ENAB;
5219                 break;
5220
5221         case ANEG_STATE_COMPLETE_ACK:
5222                 if (ap->ability_match != 0 &&
5223                     ap->rxconfig == 0) {
5224                         ap->state = ANEG_STATE_AN_ENABLE;
5225                         break;
5226                 }
5227                 delta = ap->cur_time - ap->link_time;
5228                 if (delta > ANEG_STATE_SETTLE_TIME) {
5229                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5230                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5231                         } else {
5232                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5233                                     !(ap->flags & MR_NP_RX)) {
5234                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5235                                 } else {
5236                                         ret = ANEG_FAILED;
5237                                 }
5238                         }
5239                 }
5240                 break;
5241
5242         case ANEG_STATE_IDLE_DETECT_INIT:
5243                 ap->link_time = ap->cur_time;
5244                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5245                 tw32_f(MAC_MODE, tp->mac_mode);
5246                 udelay(40);
5247
5248                 ap->state = ANEG_STATE_IDLE_DETECT;
5249                 ret = ANEG_TIMER_ENAB;
5250                 break;
5251
5252         case ANEG_STATE_IDLE_DETECT:
5253                 if (ap->ability_match != 0 &&
5254                     ap->rxconfig == 0) {
5255                         ap->state = ANEG_STATE_AN_ENABLE;
5256                         break;
5257                 }
5258                 delta = ap->cur_time - ap->link_time;
5259                 if (delta > ANEG_STATE_SETTLE_TIME) {
5260                         /* XXX another gem from the Broadcom driver :( */
5261                         ap->state = ANEG_STATE_LINK_OK;
5262                 }
5263                 break;
5264
5265         case ANEG_STATE_LINK_OK:
5266                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5267                 ret = ANEG_DONE;
5268                 break;
5269
5270         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5271                 /* ??? unimplemented */
5272                 break;
5273
5274         case ANEG_STATE_NEXT_PAGE_WAIT:
5275                 /* ??? unimplemented */
5276                 break;
5277
5278         default:
5279                 ret = ANEG_FAILED;
5280                 break;
5281         }
5282
5283         return ret;
5284 }
5285
5286 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5287 {
5288         int res = 0;
5289         struct tg3_fiber_aneginfo aninfo;
5290         int status = ANEG_FAILED;
5291         unsigned int tick;
5292         u32 tmp;
5293
5294         tw32_f(MAC_TX_AUTO_NEG, 0);
5295
5296         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5297         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5298         udelay(40);
5299
5300         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5301         udelay(40);
5302
5303         memset(&aninfo, 0, sizeof(aninfo));
5304         aninfo.flags |= MR_AN_ENABLE;
5305         aninfo.state = ANEG_STATE_UNKNOWN;
5306         aninfo.cur_time = 0;
5307         tick = 0;
5308         while (++tick < 195000) {
5309                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5310                 if (status == ANEG_DONE || status == ANEG_FAILED)
5311                         break;
5312
5313                 udelay(1);
5314         }
5315
5316         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5317         tw32_f(MAC_MODE, tp->mac_mode);
5318         udelay(40);
5319
5320         *txflags = aninfo.txconfig;
5321         *rxflags = aninfo.flags;
5322
5323         if (status == ANEG_DONE &&
5324             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5325                              MR_LP_ADV_FULL_DUPLEX)))
5326                 res = 1;
5327
5328         return res;
5329 }
5330
5331 static void tg3_init_bcm8002(struct tg3 *tp)
5332 {
5333         u32 mac_status = tr32(MAC_STATUS);
5334         int i;
5335
5336         /* Reset when initting first time or we have a link. */
5337         if (tg3_flag(tp, INIT_COMPLETE) &&
5338             !(mac_status & MAC_STATUS_PCS_SYNCED))
5339                 return;
5340
5341         /* Set PLL lock range. */
5342         tg3_writephy(tp, 0x16, 0x8007);
5343
5344         /* SW reset */
5345         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5346
5347         /* Wait for reset to complete. */
5348         /* XXX schedule_timeout() ... */
5349         for (i = 0; i < 500; i++)
5350                 udelay(10);
5351
5352         /* Config mode; select PMA/Ch 1 regs. */
5353         tg3_writephy(tp, 0x10, 0x8411);
5354
5355         /* Enable auto-lock and comdet, select txclk for tx. */
5356         tg3_writephy(tp, 0x11, 0x0a10);
5357
5358         tg3_writephy(tp, 0x18, 0x00a0);
5359         tg3_writephy(tp, 0x16, 0x41ff);
5360
5361         /* Assert and deassert POR. */
5362         tg3_writephy(tp, 0x13, 0x0400);
5363         udelay(40);
5364         tg3_writephy(tp, 0x13, 0x0000);
5365
5366         tg3_writephy(tp, 0x11, 0x0a50);
5367         udelay(40);
5368         tg3_writephy(tp, 0x11, 0x0a10);
5369
5370         /* Wait for signal to stabilize */
5371         /* XXX schedule_timeout() ... */
5372         for (i = 0; i < 15000; i++)
5373                 udelay(10);
5374
5375         /* Deselect the channel register so we can read the PHYID
5376          * later.
5377          */
5378         tg3_writephy(tp, 0x10, 0x8011);
5379 }
5380
5381 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5382 {
5383         u16 flowctrl;
5384         bool current_link_up;
5385         u32 sg_dig_ctrl, sg_dig_status;
5386         u32 serdes_cfg, expected_sg_dig_ctrl;
5387         int workaround, port_a;
5388
5389         serdes_cfg = 0;
5390         expected_sg_dig_ctrl = 0;
5391         workaround = 0;
5392         port_a = 1;
5393         current_link_up = false;
5394
5395         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5396             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5397                 workaround = 1;
5398                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5399                         port_a = 0;
5400
5401                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5402                 /* preserve bits 20-23 for voltage regulator */
5403                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5404         }
5405
5406         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5407
5408         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5409                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5410                         if (workaround) {
5411                                 u32 val = serdes_cfg;
5412
5413                                 if (port_a)
5414                                         val |= 0xc010000;
5415                                 else
5416                                         val |= 0x4010000;
5417                                 tw32_f(MAC_SERDES_CFG, val);
5418                         }
5419
5420                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5421                 }
5422                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5423                         tg3_setup_flow_control(tp, 0, 0);
5424                         current_link_up = true;
5425                 }
5426                 goto out;
5427         }
5428
5429         /* Want auto-negotiation.  */
5430         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5431
5432         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5433         if (flowctrl & ADVERTISE_1000XPAUSE)
5434                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5435         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5436                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5437
5438         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5439                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5440                     tp->serdes_counter &&
5441                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5442                                     MAC_STATUS_RCVD_CFG)) ==
5443                      MAC_STATUS_PCS_SYNCED)) {
5444                         tp->serdes_counter--;
5445                         current_link_up = true;
5446                         goto out;
5447                 }
5448 restart_autoneg:
5449                 if (workaround)
5450                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5451                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5452                 udelay(5);
5453                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5454
5455                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5456                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5457         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5458                                  MAC_STATUS_SIGNAL_DET)) {
5459                 sg_dig_status = tr32(SG_DIG_STATUS);
5460                 mac_status = tr32(MAC_STATUS);
5461
5462                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5463                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5464                         u32 local_adv = 0, remote_adv = 0;
5465
5466                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5467                                 local_adv |= ADVERTISE_1000XPAUSE;
5468                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5469                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5470
5471                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5472                                 remote_adv |= LPA_1000XPAUSE;
5473                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5474                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5475
5476                         tp->link_config.rmt_adv =
5477                                            mii_adv_to_ethtool_adv_x(remote_adv);
5478
5479                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5480                         current_link_up = true;
5481                         tp->serdes_counter = 0;
5482                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5483                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5484                         if (tp->serdes_counter)
5485                                 tp->serdes_counter--;
5486                         else {
5487                                 if (workaround) {
5488                                         u32 val = serdes_cfg;
5489
5490                                         if (port_a)
5491                                                 val |= 0xc010000;
5492                                         else
5493                                                 val |= 0x4010000;
5494
5495                                         tw32_f(MAC_SERDES_CFG, val);
5496                                 }
5497
5498                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5499                                 udelay(40);
5500
5501                                 /* Link parallel detection - link is up */
5502                                 /* only if we have PCS_SYNC and not */
5503                                 /* receiving config code words */
5504                                 mac_status = tr32(MAC_STATUS);
5505                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5506                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5507                                         tg3_setup_flow_control(tp, 0, 0);
5508                                         current_link_up = true;
5509                                         tp->phy_flags |=
5510                                                 TG3_PHYFLG_PARALLEL_DETECT;
5511                                         tp->serdes_counter =
5512                                                 SERDES_PARALLEL_DET_TIMEOUT;
5513                                 } else
5514                                         goto restart_autoneg;
5515                         }
5516                 }
5517         } else {
5518                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5519                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5520         }
5521
5522 out:
5523         return current_link_up;
5524 }
5525
5526 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5527 {
5528         bool current_link_up = false;
5529
5530         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5531                 goto out;
5532
5533         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5534                 u32 txflags, rxflags;
5535                 int i;
5536
5537                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5538                         u32 local_adv = 0, remote_adv = 0;
5539
5540                         if (txflags & ANEG_CFG_PS1)
5541                                 local_adv |= ADVERTISE_1000XPAUSE;
5542                         if (txflags & ANEG_CFG_PS2)
5543                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5544
5545                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5546                                 remote_adv |= LPA_1000XPAUSE;
5547                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5548                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5549
5550                         tp->link_config.rmt_adv =
5551                                            mii_adv_to_ethtool_adv_x(remote_adv);
5552
5553                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5554
5555                         current_link_up = true;
5556                 }
5557                 for (i = 0; i < 30; i++) {
5558                         udelay(20);
5559                         tw32_f(MAC_STATUS,
5560                                (MAC_STATUS_SYNC_CHANGED |
5561                                 MAC_STATUS_CFG_CHANGED));
5562                         udelay(40);
5563                         if ((tr32(MAC_STATUS) &
5564                              (MAC_STATUS_SYNC_CHANGED |
5565                               MAC_STATUS_CFG_CHANGED)) == 0)
5566                                 break;
5567                 }
5568
5569                 mac_status = tr32(MAC_STATUS);
5570                 if (!current_link_up &&
5571                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5572                     !(mac_status & MAC_STATUS_RCVD_CFG))
5573                         current_link_up = true;
5574         } else {
5575                 tg3_setup_flow_control(tp, 0, 0);
5576
5577                 /* Forcing 1000FD link up. */
5578                 current_link_up = true;
5579
5580                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5581                 udelay(40);
5582
5583                 tw32_f(MAC_MODE, tp->mac_mode);
5584                 udelay(40);
5585         }
5586
5587 out:
5588         return current_link_up;
5589 }
5590
5591 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5592 {
5593         u32 orig_pause_cfg;
5594         u16 orig_active_speed;
5595         u8 orig_active_duplex;
5596         u32 mac_status;
5597         bool current_link_up;
5598         int i;
5599
5600         orig_pause_cfg = tp->link_config.active_flowctrl;
5601         orig_active_speed = tp->link_config.active_speed;
5602         orig_active_duplex = tp->link_config.active_duplex;
5603
5604         if (!tg3_flag(tp, HW_AUTONEG) &&
5605             tp->link_up &&
5606             tg3_flag(tp, INIT_COMPLETE)) {
5607                 mac_status = tr32(MAC_STATUS);
5608                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5609                                MAC_STATUS_SIGNAL_DET |
5610                                MAC_STATUS_CFG_CHANGED |
5611                                MAC_STATUS_RCVD_CFG);
5612                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5613                                    MAC_STATUS_SIGNAL_DET)) {
5614                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5615                                             MAC_STATUS_CFG_CHANGED));
5616                         return 0;
5617                 }
5618         }
5619
5620         tw32_f(MAC_TX_AUTO_NEG, 0);
5621
5622         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5623         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5624         tw32_f(MAC_MODE, tp->mac_mode);
5625         udelay(40);
5626
5627         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5628                 tg3_init_bcm8002(tp);
5629
5630         /* Enable link change event even when serdes polling.  */
5631         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5632         udelay(40);
5633
5634         current_link_up = false;
5635         tp->link_config.rmt_adv = 0;
5636         mac_status = tr32(MAC_STATUS);
5637
5638         if (tg3_flag(tp, HW_AUTONEG))
5639                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5640         else
5641                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5642
5643         tp->napi[0].hw_status->status =
5644                 (SD_STATUS_UPDATED |
5645                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5646
5647         for (i = 0; i < 100; i++) {
5648                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5649                                     MAC_STATUS_CFG_CHANGED));
5650                 udelay(5);
5651                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5652                                          MAC_STATUS_CFG_CHANGED |
5653                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5654                         break;
5655         }
5656
5657         mac_status = tr32(MAC_STATUS);
5658         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5659                 current_link_up = false;
5660                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5661                     tp->serdes_counter == 0) {
5662                         tw32_f(MAC_MODE, (tp->mac_mode |
5663                                           MAC_MODE_SEND_CONFIGS));
5664                         udelay(1);
5665                         tw32_f(MAC_MODE, tp->mac_mode);
5666                 }
5667         }
5668
5669         if (current_link_up) {
5670                 tp->link_config.active_speed = SPEED_1000;
5671                 tp->link_config.active_duplex = DUPLEX_FULL;
5672                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5673                                     LED_CTRL_LNKLED_OVERRIDE |
5674                                     LED_CTRL_1000MBPS_ON));
5675         } else {
5676                 tp->link_config.active_speed = SPEED_UNKNOWN;
5677                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5678                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5679                                     LED_CTRL_LNKLED_OVERRIDE |
5680                                     LED_CTRL_TRAFFIC_OVERRIDE));
5681         }
5682
5683         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5684                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5685                 if (orig_pause_cfg != now_pause_cfg ||
5686                     orig_active_speed != tp->link_config.active_speed ||
5687                     orig_active_duplex != tp->link_config.active_duplex)
5688                         tg3_link_report(tp);
5689         }
5690
5691         return 0;
5692 }
5693
5694 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5695 {
5696         int err = 0;
5697         u32 bmsr, bmcr;
5698         u16 current_speed = SPEED_UNKNOWN;
5699         u8 current_duplex = DUPLEX_UNKNOWN;
5700         bool current_link_up = false;
5701         u32 local_adv, remote_adv, sgsr;
5702
5703         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5704              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5705              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5706              (sgsr & SERDES_TG3_SGMII_MODE)) {
5707
5708                 if (force_reset)
5709                         tg3_phy_reset(tp);
5710
5711                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5712
5713                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5714                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5715                 } else {
5716                         current_link_up = true;
5717                         if (sgsr & SERDES_TG3_SPEED_1000) {
5718                                 current_speed = SPEED_1000;
5719                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5720                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5721                                 current_speed = SPEED_100;
5722                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5723                         } else {
5724                                 current_speed = SPEED_10;
5725                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5726                         }
5727
5728                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5729                                 current_duplex = DUPLEX_FULL;
5730                         else
5731                                 current_duplex = DUPLEX_HALF;
5732                 }
5733
5734                 tw32_f(MAC_MODE, tp->mac_mode);
5735                 udelay(40);
5736
5737                 tg3_clear_mac_status(tp);
5738
5739                 goto fiber_setup_done;
5740         }
5741
5742         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5743         tw32_f(MAC_MODE, tp->mac_mode);
5744         udelay(40);
5745
5746         tg3_clear_mac_status(tp);
5747
5748         if (force_reset)
5749                 tg3_phy_reset(tp);
5750
5751         tp->link_config.rmt_adv = 0;
5752
5753         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5754         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5755         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5756                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5757                         bmsr |= BMSR_LSTATUS;
5758                 else
5759                         bmsr &= ~BMSR_LSTATUS;
5760         }
5761
5762         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5763
5764         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5765             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5766                 /* do nothing, just check for link up at the end */
5767         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5768                 u32 adv, newadv;
5769
5770                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5771                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5772                                  ADVERTISE_1000XPAUSE |
5773                                  ADVERTISE_1000XPSE_ASYM |
5774                                  ADVERTISE_SLCT);
5775
5776                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5777                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5778
5779                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5780                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5781                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5782                         tg3_writephy(tp, MII_BMCR, bmcr);
5783
5784                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5785                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5786                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5787
5788                         return err;
5789                 }
5790         } else {
5791                 u32 new_bmcr;
5792
5793                 bmcr &= ~BMCR_SPEED1000;
5794                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5795
5796                 if (tp->link_config.duplex == DUPLEX_FULL)
5797                         new_bmcr |= BMCR_FULLDPLX;
5798
5799                 if (new_bmcr != bmcr) {
5800                         /* BMCR_SPEED1000 is a reserved bit that needs
5801                          * to be set on write.
5802                          */
5803                         new_bmcr |= BMCR_SPEED1000;
5804
5805                         /* Force a linkdown */
5806                         if (tp->link_up) {
5807                                 u32 adv;
5808
5809                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5810                                 adv &= ~(ADVERTISE_1000XFULL |
5811                                          ADVERTISE_1000XHALF |
5812                                          ADVERTISE_SLCT);
5813                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5814                                 tg3_writephy(tp, MII_BMCR, bmcr |
5815                                                            BMCR_ANRESTART |
5816                                                            BMCR_ANENABLE);
5817                                 udelay(10);
5818                                 tg3_carrier_off(tp);
5819                         }
5820                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5821                         bmcr = new_bmcr;
5822                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5823                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5824                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5825                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5826                                         bmsr |= BMSR_LSTATUS;
5827                                 else
5828                                         bmsr &= ~BMSR_LSTATUS;
5829                         }
5830                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5831                 }
5832         }
5833
5834         if (bmsr & BMSR_LSTATUS) {
5835                 current_speed = SPEED_1000;
5836                 current_link_up = true;
5837                 if (bmcr & BMCR_FULLDPLX)
5838                         current_duplex = DUPLEX_FULL;
5839                 else
5840                         current_duplex = DUPLEX_HALF;
5841
5842                 local_adv = 0;
5843                 remote_adv = 0;
5844
5845                 if (bmcr & BMCR_ANENABLE) {
5846                         u32 common;
5847
5848                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5849                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5850                         common = local_adv & remote_adv;
5851                         if (common & (ADVERTISE_1000XHALF |
5852                                       ADVERTISE_1000XFULL)) {
5853                                 if (common & ADVERTISE_1000XFULL)
5854                                         current_duplex = DUPLEX_FULL;
5855                                 else
5856                                         current_duplex = DUPLEX_HALF;
5857
5858                                 tp->link_config.rmt_adv =
5859                                            mii_adv_to_ethtool_adv_x(remote_adv);
5860                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5861                                 /* Link is up via parallel detect */
5862                         } else {
5863                                 current_link_up = false;
5864                         }
5865                 }
5866         }
5867
5868 fiber_setup_done:
5869         if (current_link_up && current_duplex == DUPLEX_FULL)
5870                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5871
5872         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5873         if (tp->link_config.active_duplex == DUPLEX_HALF)
5874                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5875
5876         tw32_f(MAC_MODE, tp->mac_mode);
5877         udelay(40);
5878
5879         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5880
5881         tp->link_config.active_speed = current_speed;
5882         tp->link_config.active_duplex = current_duplex;
5883
5884         tg3_test_and_report_link_chg(tp, current_link_up);
5885         return err;
5886 }
5887
5888 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5889 {
5890         if (tp->serdes_counter) {
5891                 /* Give autoneg time to complete. */
5892                 tp->serdes_counter--;
5893                 return;
5894         }
5895
5896         if (!tp->link_up &&
5897             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5898                 u32 bmcr;
5899
5900                 tg3_readphy(tp, MII_BMCR, &bmcr);
5901                 if (bmcr & BMCR_ANENABLE) {
5902                         u32 phy1, phy2;
5903
5904                         /* Select shadow register 0x1f */
5905                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5906                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5907
5908                         /* Select expansion interrupt status register */
5909                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5910                                          MII_TG3_DSP_EXP1_INT_STAT);
5911                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5912                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5913
5914                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5915                                 /* We have signal detect and not receiving
5916                                  * config code words, link is up by parallel
5917                                  * detection.
5918                                  */
5919
5920                                 bmcr &= ~BMCR_ANENABLE;
5921                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5922                                 tg3_writephy(tp, MII_BMCR, bmcr);
5923                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5924                         }
5925                 }
5926         } else if (tp->link_up &&
5927                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5928                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5929                 u32 phy2;
5930
5931                 /* Select expansion interrupt status register */
5932                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5933                                  MII_TG3_DSP_EXP1_INT_STAT);
5934                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5935                 if (phy2 & 0x20) {
5936                         u32 bmcr;
5937
5938                         /* Config code words received, turn on autoneg. */
5939                         tg3_readphy(tp, MII_BMCR, &bmcr);
5940                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5941
5942                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5943
5944                 }
5945         }
5946 }
5947
5948 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5949 {
5950         u32 val;
5951         int err;
5952
5953         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5954                 err = tg3_setup_fiber_phy(tp, force_reset);
5955         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5956                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5957         else
5958                 err = tg3_setup_copper_phy(tp, force_reset);
5959
5960         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5961                 u32 scale;
5962
5963                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5964                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5965                         scale = 65;
5966                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5967                         scale = 6;
5968                 else
5969                         scale = 12;
5970
5971                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5972                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5973                 tw32(GRC_MISC_CFG, val);
5974         }
5975
5976         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5977               (6 << TX_LENGTHS_IPG_SHIFT);
5978         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5979             tg3_asic_rev(tp) == ASIC_REV_5762)
5980                 val |= tr32(MAC_TX_LENGTHS) &
5981                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5982                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5983
5984         if (tp->link_config.active_speed == SPEED_1000 &&
5985             tp->link_config.active_duplex == DUPLEX_HALF)
5986                 tw32(MAC_TX_LENGTHS, val |
5987                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5988         else
5989                 tw32(MAC_TX_LENGTHS, val |
5990                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5991
5992         if (!tg3_flag(tp, 5705_PLUS)) {
5993                 if (tp->link_up) {
5994                         tw32(HOSTCC_STAT_COAL_TICKS,
5995                              tp->coal.stats_block_coalesce_usecs);
5996                 } else {
5997                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5998                 }
5999         }
6000
6001         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6002                 val = tr32(PCIE_PWR_MGMT_THRESH);
6003                 if (!tp->link_up)
6004                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6005                               tp->pwrmgmt_thresh;
6006                 else
6007                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6008                 tw32(PCIE_PWR_MGMT_THRESH, val);
6009         }
6010
6011         return err;
6012 }
6013
6014 /* tp->lock must be held */
6015 static u64 tg3_refclk_read(struct tg3 *tp)
6016 {
6017         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6018         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6019 }
6020
6021 /* tp->lock must be held */
6022 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6023 {
6024         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6025         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6026         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6027         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6028 }
6029
6030 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6031 static inline void tg3_full_unlock(struct tg3 *tp);
6032 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6033 {
6034         struct tg3 *tp = netdev_priv(dev);
6035
6036         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6037                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6038                                 SOF_TIMESTAMPING_SOFTWARE;
6039
6040         if (tg3_flag(tp, PTP_CAPABLE)) {
6041                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6042                                         SOF_TIMESTAMPING_RX_HARDWARE |
6043                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6044         }
6045
6046         if (tp->ptp_clock)
6047                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6048         else
6049                 info->phc_index = -1;
6050
6051         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6052
6053         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6054                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6055                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6056                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6057         return 0;
6058 }
6059
6060 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6061 {
6062         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6063         bool neg_adj = false;
6064         u32 correction = 0;
6065
6066         if (ppb < 0) {
6067                 neg_adj = true;
6068                 ppb = -ppb;
6069         }
6070
6071         /* Frequency adjustment is performed using hardware with a 24 bit
6072          * accumulator and a programmable correction value. On each clk, the
6073          * correction value gets added to the accumulator and when it
6074          * overflows, the time counter is incremented/decremented.
6075          *
6076          * So conversion from ppb to correction value is
6077          *              ppb * (1 << 24) / 1000000000
6078          */
6079         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6080                      TG3_EAV_REF_CLK_CORRECT_MASK;
6081
6082         tg3_full_lock(tp, 0);
6083
6084         if (correction)
6085                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6086                      TG3_EAV_REF_CLK_CORRECT_EN |
6087                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6088         else
6089                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6090
6091         tg3_full_unlock(tp);
6092
6093         return 0;
6094 }
6095
6096 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6097 {
6098         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6099
6100         tg3_full_lock(tp, 0);
6101         tp->ptp_adjust += delta;
6102         tg3_full_unlock(tp);
6103
6104         return 0;
6105 }
6106
6107 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6108 {
6109         u64 ns;
6110         u32 remainder;
6111         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6112
6113         tg3_full_lock(tp, 0);
6114         ns = tg3_refclk_read(tp);
6115         ns += tp->ptp_adjust;
6116         tg3_full_unlock(tp);
6117
6118         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6119         ts->tv_nsec = remainder;
6120
6121         return 0;
6122 }
6123
6124 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6125                            const struct timespec *ts)
6126 {
6127         u64 ns;
6128         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6129
6130         ns = timespec_to_ns(ts);
6131
6132         tg3_full_lock(tp, 0);
6133         tg3_refclk_write(tp, ns);
6134         tp->ptp_adjust = 0;
6135         tg3_full_unlock(tp);
6136
6137         return 0;
6138 }
6139
6140 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6141                           struct ptp_clock_request *rq, int on)
6142 {
6143         return -EOPNOTSUPP;
6144 }
6145
6146 static const struct ptp_clock_info tg3_ptp_caps = {
6147         .owner          = THIS_MODULE,
6148         .name           = "tg3 clock",
6149         .max_adj        = 250000000,
6150         .n_alarm        = 0,
6151         .n_ext_ts       = 0,
6152         .n_per_out      = 0,
6153         .pps            = 0,
6154         .adjfreq        = tg3_ptp_adjfreq,
6155         .adjtime        = tg3_ptp_adjtime,
6156         .gettime        = tg3_ptp_gettime,
6157         .settime        = tg3_ptp_settime,
6158         .enable         = tg3_ptp_enable,
6159 };
6160
6161 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6162                                      struct skb_shared_hwtstamps *timestamp)
6163 {
6164         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6165         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6166                                            tp->ptp_adjust);
6167 }
6168
6169 /* tp->lock must be held */
6170 static void tg3_ptp_init(struct tg3 *tp)
6171 {
6172         if (!tg3_flag(tp, PTP_CAPABLE))
6173                 return;
6174
6175         /* Initialize the hardware clock to the system time. */
6176         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6177         tp->ptp_adjust = 0;
6178         tp->ptp_info = tg3_ptp_caps;
6179 }
6180
6181 /* tp->lock must be held */
6182 static void tg3_ptp_resume(struct tg3 *tp)
6183 {
6184         if (!tg3_flag(tp, PTP_CAPABLE))
6185                 return;
6186
6187         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6188         tp->ptp_adjust = 0;
6189 }
6190
6191 static void tg3_ptp_fini(struct tg3 *tp)
6192 {
6193         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6194                 return;
6195
6196         ptp_clock_unregister(tp->ptp_clock);
6197         tp->ptp_clock = NULL;
6198         tp->ptp_adjust = 0;
6199 }
6200
6201 static inline int tg3_irq_sync(struct tg3 *tp)
6202 {
6203         return tp->irq_sync;
6204 }
6205
6206 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6207 {
6208         int i;
6209
6210         dst = (u32 *)((u8 *)dst + off);
6211         for (i = 0; i < len; i += sizeof(u32))
6212                 *dst++ = tr32(off + i);
6213 }
6214
6215 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6216 {
6217         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6218         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6219         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6220         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6221         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6222         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6223         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6224         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6225         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6226         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6227         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6228         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6229         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6230         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6231         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6232         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6233         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6234         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6235         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6236
6237         if (tg3_flag(tp, SUPPORT_MSIX))
6238                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6239
6240         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6241         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6242         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6243         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6244         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6245         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6246         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6247         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6248
6249         if (!tg3_flag(tp, 5705_PLUS)) {
6250                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6251                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6252                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6253         }
6254
6255         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6256         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6257         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6258         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6259         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6260
6261         if (tg3_flag(tp, NVRAM))
6262                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6263 }
6264
6265 static void tg3_dump_state(struct tg3 *tp)
6266 {
6267         int i;
6268         u32 *regs;
6269
6270         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6271         if (!regs)
6272                 return;
6273
6274         if (tg3_flag(tp, PCI_EXPRESS)) {
6275                 /* Read up to but not including private PCI registers */
6276                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6277                         regs[i / sizeof(u32)] = tr32(i);
6278         } else
6279                 tg3_dump_legacy_regs(tp, regs);
6280
6281         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6282                 if (!regs[i + 0] && !regs[i + 1] &&
6283                     !regs[i + 2] && !regs[i + 3])
6284                         continue;
6285
6286                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6287                            i * 4,
6288                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6289         }
6290
6291         kfree(regs);
6292
6293         for (i = 0; i < tp->irq_cnt; i++) {
6294                 struct tg3_napi *tnapi = &tp->napi[i];
6295
6296                 /* SW status block */
6297                 netdev_err(tp->dev,
6298                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6299                            i,
6300                            tnapi->hw_status->status,
6301                            tnapi->hw_status->status_tag,
6302                            tnapi->hw_status->rx_jumbo_consumer,
6303                            tnapi->hw_status->rx_consumer,
6304                            tnapi->hw_status->rx_mini_consumer,
6305                            tnapi->hw_status->idx[0].rx_producer,
6306                            tnapi->hw_status->idx[0].tx_consumer);
6307
6308                 netdev_err(tp->dev,
6309                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6310                            i,
6311                            tnapi->last_tag, tnapi->last_irq_tag,
6312                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6313                            tnapi->rx_rcb_ptr,
6314                            tnapi->prodring.rx_std_prod_idx,
6315                            tnapi->prodring.rx_std_cons_idx,
6316                            tnapi->prodring.rx_jmb_prod_idx,
6317                            tnapi->prodring.rx_jmb_cons_idx);
6318         }
6319 }
6320
6321 /* This is called whenever we suspect that the system chipset is re-
6322  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6323  * is bogus tx completions. We try to recover by setting the
6324  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6325  * in the workqueue.
6326  */
6327 static void tg3_tx_recover(struct tg3 *tp)
6328 {
6329         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6330                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6331
6332         netdev_warn(tp->dev,
6333                     "The system may be re-ordering memory-mapped I/O "
6334                     "cycles to the network device, attempting to recover. "
6335                     "Please report the problem to the driver maintainer "
6336                     "and include system chipset information.\n");
6337
6338         spin_lock(&tp->lock);
6339         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6340         spin_unlock(&tp->lock);
6341 }
6342
6343 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6344 {
6345         /* Tell compiler to fetch tx indices from memory. */
6346         barrier();
6347         return tnapi->tx_pending -
6348                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6349 }
6350
6351 /* Tigon3 never reports partial packet sends.  So we do not
6352  * need special logic to handle SKBs that have not had all
6353  * of their frags sent yet, like SunGEM does.
6354  */
6355 static void tg3_tx(struct tg3_napi *tnapi)
6356 {
6357         struct tg3 *tp = tnapi->tp;
6358         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6359         u32 sw_idx = tnapi->tx_cons;
6360         struct netdev_queue *txq;
6361         int index = tnapi - tp->napi;
6362         unsigned int pkts_compl = 0, bytes_compl = 0;
6363
6364         if (tg3_flag(tp, ENABLE_TSS))
6365                 index--;
6366
6367         txq = netdev_get_tx_queue(tp->dev, index);
6368
6369         while (sw_idx != hw_idx) {
6370                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6371                 struct sk_buff *skb = ri->skb;
6372                 int i, tx_bug = 0;
6373
6374                 if (unlikely(skb == NULL)) {
6375                         tg3_tx_recover(tp);
6376                         return;
6377                 }
6378
6379                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6380                         struct skb_shared_hwtstamps timestamp;
6381                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6382                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6383
6384                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6385
6386                         skb_tstamp_tx(skb, &timestamp);
6387                 }
6388
6389                 pci_unmap_single(tp->pdev,
6390                                  dma_unmap_addr(ri, mapping),
6391                                  skb_headlen(skb),
6392                                  PCI_DMA_TODEVICE);
6393
6394                 ri->skb = NULL;
6395
6396                 while (ri->fragmented) {
6397                         ri->fragmented = false;
6398                         sw_idx = NEXT_TX(sw_idx);
6399                         ri = &tnapi->tx_buffers[sw_idx];
6400                 }
6401
6402                 sw_idx = NEXT_TX(sw_idx);
6403
6404                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6405                         ri = &tnapi->tx_buffers[sw_idx];
6406                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6407                                 tx_bug = 1;
6408
6409                         pci_unmap_page(tp->pdev,
6410                                        dma_unmap_addr(ri, mapping),
6411                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6412                                        PCI_DMA_TODEVICE);
6413
6414                         while (ri->fragmented) {
6415                                 ri->fragmented = false;
6416                                 sw_idx = NEXT_TX(sw_idx);
6417                                 ri = &tnapi->tx_buffers[sw_idx];
6418                         }
6419
6420                         sw_idx = NEXT_TX(sw_idx);
6421                 }
6422
6423                 pkts_compl++;
6424                 bytes_compl += skb->len;
6425
6426                 dev_kfree_skb(skb);
6427
6428                 if (unlikely(tx_bug)) {
6429                         tg3_tx_recover(tp);
6430                         return;
6431                 }
6432         }
6433
6434         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6435
6436         tnapi->tx_cons = sw_idx;
6437
6438         /* Need to make the tx_cons update visible to tg3_start_xmit()
6439          * before checking for netif_queue_stopped().  Without the
6440          * memory barrier, there is a small possibility that tg3_start_xmit()
6441          * will miss it and cause the queue to be stopped forever.
6442          */
6443         smp_mb();
6444
6445         if (unlikely(netif_tx_queue_stopped(txq) &&
6446                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6447                 __netif_tx_lock(txq, smp_processor_id());
6448                 if (netif_tx_queue_stopped(txq) &&
6449                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6450                         netif_tx_wake_queue(txq);
6451                 __netif_tx_unlock(txq);
6452         }
6453 }
6454
6455 static void tg3_frag_free(bool is_frag, void *data)
6456 {
6457         if (is_frag)
6458                 put_page(virt_to_head_page(data));
6459         else
6460                 kfree(data);
6461 }
6462
6463 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6464 {
6465         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6466                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6467
6468         if (!ri->data)
6469                 return;
6470
6471         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6472                          map_sz, PCI_DMA_FROMDEVICE);
6473         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6474         ri->data = NULL;
6475 }
6476
6477
6478 /* Returns size of skb allocated or < 0 on error.
6479  *
6480  * We only need to fill in the address because the other members
6481  * of the RX descriptor are invariant, see tg3_init_rings.
6482  *
6483  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6484  * posting buffers we only dirty the first cache line of the RX
6485  * descriptor (containing the address).  Whereas for the RX status
6486  * buffers the cpu only reads the last cacheline of the RX descriptor
6487  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6488  */
6489 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6490                              u32 opaque_key, u32 dest_idx_unmasked,
6491                              unsigned int *frag_size)
6492 {
6493         struct tg3_rx_buffer_desc *desc;
6494         struct ring_info *map;
6495         u8 *data;
6496         dma_addr_t mapping;
6497         int skb_size, data_size, dest_idx;
6498
6499         switch (opaque_key) {
6500         case RXD_OPAQUE_RING_STD:
6501                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6502                 desc = &tpr->rx_std[dest_idx];
6503                 map = &tpr->rx_std_buffers[dest_idx];
6504                 data_size = tp->rx_pkt_map_sz;
6505                 break;
6506
6507         case RXD_OPAQUE_RING_JUMBO:
6508                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6509                 desc = &tpr->rx_jmb[dest_idx].std;
6510                 map = &tpr->rx_jmb_buffers[dest_idx];
6511                 data_size = TG3_RX_JMB_MAP_SZ;
6512                 break;
6513
6514         default:
6515                 return -EINVAL;
6516         }
6517
6518         /* Do not overwrite any of the map or rp information
6519          * until we are sure we can commit to a new buffer.
6520          *
6521          * Callers depend upon this behavior and assume that
6522          * we leave everything unchanged if we fail.
6523          */
6524         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6525                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6526         if (skb_size <= PAGE_SIZE) {
6527                 data = netdev_alloc_frag(skb_size);
6528                 *frag_size = skb_size;
6529         } else {
6530                 data = kmalloc(skb_size, GFP_ATOMIC);
6531                 *frag_size = 0;
6532         }
6533         if (!data)
6534                 return -ENOMEM;
6535
6536         mapping = pci_map_single(tp->pdev,
6537                                  data + TG3_RX_OFFSET(tp),
6538                                  data_size,
6539                                  PCI_DMA_FROMDEVICE);
6540         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6541                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6542                 return -EIO;
6543         }
6544
6545         map->data = data;
6546         dma_unmap_addr_set(map, mapping, mapping);
6547
6548         desc->addr_hi = ((u64)mapping >> 32);
6549         desc->addr_lo = ((u64)mapping & 0xffffffff);
6550
6551         return data_size;
6552 }
6553
6554 /* We only need to move over in the address because the other
6555  * members of the RX descriptor are invariant.  See notes above
6556  * tg3_alloc_rx_data for full details.
6557  */
6558 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6559                            struct tg3_rx_prodring_set *dpr,
6560                            u32 opaque_key, int src_idx,
6561                            u32 dest_idx_unmasked)
6562 {
6563         struct tg3 *tp = tnapi->tp;
6564         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6565         struct ring_info *src_map, *dest_map;
6566         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6567         int dest_idx;
6568
6569         switch (opaque_key) {
6570         case RXD_OPAQUE_RING_STD:
6571                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6572                 dest_desc = &dpr->rx_std[dest_idx];
6573                 dest_map = &dpr->rx_std_buffers[dest_idx];
6574                 src_desc = &spr->rx_std[src_idx];
6575                 src_map = &spr->rx_std_buffers[src_idx];
6576                 break;
6577
6578         case RXD_OPAQUE_RING_JUMBO:
6579                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6580                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6581                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6582                 src_desc = &spr->rx_jmb[src_idx].std;
6583                 src_map = &spr->rx_jmb_buffers[src_idx];
6584                 break;
6585
6586         default:
6587                 return;
6588         }
6589
6590         dest_map->data = src_map->data;
6591         dma_unmap_addr_set(dest_map, mapping,
6592                            dma_unmap_addr(src_map, mapping));
6593         dest_desc->addr_hi = src_desc->addr_hi;
6594         dest_desc->addr_lo = src_desc->addr_lo;
6595
6596         /* Ensure that the update to the skb happens after the physical
6597          * addresses have been transferred to the new BD location.
6598          */
6599         smp_wmb();
6600
6601         src_map->data = NULL;
6602 }
6603
6604 /* The RX ring scheme is composed of multiple rings which post fresh
6605  * buffers to the chip, and one special ring the chip uses to report
6606  * status back to the host.
6607  *
6608  * The special ring reports the status of received packets to the
6609  * host.  The chip does not write into the original descriptor the
6610  * RX buffer was obtained from.  The chip simply takes the original
6611  * descriptor as provided by the host, updates the status and length
6612  * field, then writes this into the next status ring entry.
6613  *
6614  * Each ring the host uses to post buffers to the chip is described
6615  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6616  * it is first placed into the on-chip ram.  When the packet's length
6617  * is known, it walks down the TG3_BDINFO entries to select the ring.
6618  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6619  * which is within the range of the new packet's length is chosen.
6620  *
6621  * The "separate ring for rx status" scheme may sound queer, but it makes
6622  * sense from a cache coherency perspective.  If only the host writes
6623  * to the buffer post rings, and only the chip writes to the rx status
6624  * rings, then cache lines never move beyond shared-modified state.
6625  * If both the host and chip were to write into the same ring, cache line
6626  * eviction could occur since both entities want it in an exclusive state.
6627  */
6628 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6629 {
6630         struct tg3 *tp = tnapi->tp;
6631         u32 work_mask, rx_std_posted = 0;
6632         u32 std_prod_idx, jmb_prod_idx;
6633         u32 sw_idx = tnapi->rx_rcb_ptr;
6634         u16 hw_idx;
6635         int received;
6636         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6637
6638         hw_idx = *(tnapi->rx_rcb_prod_idx);
6639         /*
6640          * We need to order the read of hw_idx and the read of
6641          * the opaque cookie.
6642          */
6643         rmb();
6644         work_mask = 0;
6645         received = 0;
6646         std_prod_idx = tpr->rx_std_prod_idx;
6647         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6648         while (sw_idx != hw_idx && budget > 0) {
6649                 struct ring_info *ri;
6650                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6651                 unsigned int len;
6652                 struct sk_buff *skb;
6653                 dma_addr_t dma_addr;
6654                 u32 opaque_key, desc_idx, *post_ptr;
6655                 u8 *data;
6656                 u64 tstamp = 0;
6657
6658                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6659                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6660                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6661                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6662                         dma_addr = dma_unmap_addr(ri, mapping);
6663                         data = ri->data;
6664                         post_ptr = &std_prod_idx;
6665                         rx_std_posted++;
6666                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6667                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6668                         dma_addr = dma_unmap_addr(ri, mapping);
6669                         data = ri->data;
6670                         post_ptr = &jmb_prod_idx;
6671                 } else
6672                         goto next_pkt_nopost;
6673
6674                 work_mask |= opaque_key;
6675
6676                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6677                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6678                 drop_it:
6679                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6680                                        desc_idx, *post_ptr);
6681                 drop_it_no_recycle:
6682                         /* Other statistics kept track of by card. */
6683                         tp->rx_dropped++;
6684                         goto next_pkt;
6685                 }
6686
6687                 prefetch(data + TG3_RX_OFFSET(tp));
6688                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6689                       ETH_FCS_LEN;
6690
6691                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6692                      RXD_FLAG_PTPSTAT_PTPV1 ||
6693                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6694                      RXD_FLAG_PTPSTAT_PTPV2) {
6695                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6696                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6697                 }
6698
6699                 if (len > TG3_RX_COPY_THRESH(tp)) {
6700                         int skb_size;
6701                         unsigned int frag_size;
6702
6703                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6704                                                     *post_ptr, &frag_size);
6705                         if (skb_size < 0)
6706                                 goto drop_it;
6707
6708                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6709                                          PCI_DMA_FROMDEVICE);
6710
6711                         skb = build_skb(data, frag_size);
6712                         if (!skb) {
6713                                 tg3_frag_free(frag_size != 0, data);
6714                                 goto drop_it_no_recycle;
6715                         }
6716                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6717                         /* Ensure that the update to the data happens
6718                          * after the usage of the old DMA mapping.
6719                          */
6720                         smp_wmb();
6721
6722                         ri->data = NULL;
6723
6724                 } else {
6725                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6726                                        desc_idx, *post_ptr);
6727
6728                         skb = netdev_alloc_skb(tp->dev,
6729                                                len + TG3_RAW_IP_ALIGN);
6730                         if (skb == NULL)
6731                                 goto drop_it_no_recycle;
6732
6733                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6734                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6735                         memcpy(skb->data,
6736                                data + TG3_RX_OFFSET(tp),
6737                                len);
6738                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6739                 }
6740
6741                 skb_put(skb, len);
6742                 if (tstamp)
6743                         tg3_hwclock_to_timestamp(tp, tstamp,
6744                                                  skb_hwtstamps(skb));
6745
6746                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6747                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6748                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6749                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6750                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6751                 else
6752                         skb_checksum_none_assert(skb);
6753
6754                 skb->protocol = eth_type_trans(skb, tp->dev);
6755
6756                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6757                     skb->protocol != htons(ETH_P_8021Q)) {
6758                         dev_kfree_skb(skb);
6759                         goto drop_it_no_recycle;
6760                 }
6761
6762                 if (desc->type_flags & RXD_FLAG_VLAN &&
6763                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6764                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6765                                                desc->err_vlan & RXD_VLAN_MASK);
6766
6767                 napi_gro_receive(&tnapi->napi, skb);
6768
6769                 received++;
6770                 budget--;
6771
6772 next_pkt:
6773                 (*post_ptr)++;
6774
6775                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6776                         tpr->rx_std_prod_idx = std_prod_idx &
6777                                                tp->rx_std_ring_mask;
6778                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6779                                      tpr->rx_std_prod_idx);
6780                         work_mask &= ~RXD_OPAQUE_RING_STD;
6781                         rx_std_posted = 0;
6782                 }
6783 next_pkt_nopost:
6784                 sw_idx++;
6785                 sw_idx &= tp->rx_ret_ring_mask;
6786
6787                 /* Refresh hw_idx to see if there is new work */
6788                 if (sw_idx == hw_idx) {
6789                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6790                         rmb();
6791                 }
6792         }
6793
6794         /* ACK the status ring. */
6795         tnapi->rx_rcb_ptr = sw_idx;
6796         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6797
6798         /* Refill RX ring(s). */
6799         if (!tg3_flag(tp, ENABLE_RSS)) {
6800                 /* Sync BD data before updating mailbox */
6801                 wmb();
6802
6803                 if (work_mask & RXD_OPAQUE_RING_STD) {
6804                         tpr->rx_std_prod_idx = std_prod_idx &
6805                                                tp->rx_std_ring_mask;
6806                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6807                                      tpr->rx_std_prod_idx);
6808                 }
6809                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6810                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6811                                                tp->rx_jmb_ring_mask;
6812                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6813                                      tpr->rx_jmb_prod_idx);
6814                 }
6815                 mmiowb();
6816         } else if (work_mask) {
6817                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6818                  * updated before the producer indices can be updated.
6819                  */
6820                 smp_wmb();
6821
6822                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6823                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6824
6825                 if (tnapi != &tp->napi[1]) {
6826                         tp->rx_refill = true;
6827                         napi_schedule(&tp->napi[1].napi);
6828                 }
6829         }
6830
6831         return received;
6832 }
6833
6834 static void tg3_poll_link(struct tg3 *tp)
6835 {
6836         /* handle link change and other phy events */
6837         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6838                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6839
6840                 if (sblk->status & SD_STATUS_LINK_CHG) {
6841                         sblk->status = SD_STATUS_UPDATED |
6842                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6843                         spin_lock(&tp->lock);
6844                         if (tg3_flag(tp, USE_PHYLIB)) {
6845                                 tw32_f(MAC_STATUS,
6846                                      (MAC_STATUS_SYNC_CHANGED |
6847                                       MAC_STATUS_CFG_CHANGED |
6848                                       MAC_STATUS_MI_COMPLETION |
6849                                       MAC_STATUS_LNKSTATE_CHANGED));
6850                                 udelay(40);
6851                         } else
6852                                 tg3_setup_phy(tp, false);
6853                         spin_unlock(&tp->lock);
6854                 }
6855         }
6856 }
6857
6858 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6859                                 struct tg3_rx_prodring_set *dpr,
6860                                 struct tg3_rx_prodring_set *spr)
6861 {
6862         u32 si, di, cpycnt, src_prod_idx;
6863         int i, err = 0;
6864
6865         while (1) {
6866                 src_prod_idx = spr->rx_std_prod_idx;
6867
6868                 /* Make sure updates to the rx_std_buffers[] entries and the
6869                  * standard producer index are seen in the correct order.
6870                  */
6871                 smp_rmb();
6872
6873                 if (spr->rx_std_cons_idx == src_prod_idx)
6874                         break;
6875
6876                 if (spr->rx_std_cons_idx < src_prod_idx)
6877                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6878                 else
6879                         cpycnt = tp->rx_std_ring_mask + 1 -
6880                                  spr->rx_std_cons_idx;
6881
6882                 cpycnt = min(cpycnt,
6883                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6884
6885                 si = spr->rx_std_cons_idx;
6886                 di = dpr->rx_std_prod_idx;
6887
6888                 for (i = di; i < di + cpycnt; i++) {
6889                         if (dpr->rx_std_buffers[i].data) {
6890                                 cpycnt = i - di;
6891                                 err = -ENOSPC;
6892                                 break;
6893                         }
6894                 }
6895
6896                 if (!cpycnt)
6897                         break;
6898
6899                 /* Ensure that updates to the rx_std_buffers ring and the
6900                  * shadowed hardware producer ring from tg3_recycle_skb() are
6901                  * ordered correctly WRT the skb check above.
6902                  */
6903                 smp_rmb();
6904
6905                 memcpy(&dpr->rx_std_buffers[di],
6906                        &spr->rx_std_buffers[si],
6907                        cpycnt * sizeof(struct ring_info));
6908
6909                 for (i = 0; i < cpycnt; i++, di++, si++) {
6910                         struct tg3_rx_buffer_desc *sbd, *dbd;
6911                         sbd = &spr->rx_std[si];
6912                         dbd = &dpr->rx_std[di];
6913                         dbd->addr_hi = sbd->addr_hi;
6914                         dbd->addr_lo = sbd->addr_lo;
6915                 }
6916
6917                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6918                                        tp->rx_std_ring_mask;
6919                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6920                                        tp->rx_std_ring_mask;
6921         }
6922
6923         while (1) {
6924                 src_prod_idx = spr->rx_jmb_prod_idx;
6925
6926                 /* Make sure updates to the rx_jmb_buffers[] entries and
6927                  * the jumbo producer index are seen in the correct order.
6928                  */
6929                 smp_rmb();
6930
6931                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6932                         break;
6933
6934                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6935                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6936                 else
6937                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6938                                  spr->rx_jmb_cons_idx;
6939
6940                 cpycnt = min(cpycnt,
6941                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6942
6943                 si = spr->rx_jmb_cons_idx;
6944                 di = dpr->rx_jmb_prod_idx;
6945
6946                 for (i = di; i < di + cpycnt; i++) {
6947                         if (dpr->rx_jmb_buffers[i].data) {
6948                                 cpycnt = i - di;
6949                                 err = -ENOSPC;
6950                                 break;
6951                         }
6952                 }
6953
6954                 if (!cpycnt)
6955                         break;
6956
6957                 /* Ensure that updates to the rx_jmb_buffers ring and the
6958                  * shadowed hardware producer ring from tg3_recycle_skb() are
6959                  * ordered correctly WRT the skb check above.
6960                  */
6961                 smp_rmb();
6962
6963                 memcpy(&dpr->rx_jmb_buffers[di],
6964                        &spr->rx_jmb_buffers[si],
6965                        cpycnt * sizeof(struct ring_info));
6966
6967                 for (i = 0; i < cpycnt; i++, di++, si++) {
6968                         struct tg3_rx_buffer_desc *sbd, *dbd;
6969                         sbd = &spr->rx_jmb[si].std;
6970                         dbd = &dpr->rx_jmb[di].std;
6971                         dbd->addr_hi = sbd->addr_hi;
6972                         dbd->addr_lo = sbd->addr_lo;
6973                 }
6974
6975                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6976                                        tp->rx_jmb_ring_mask;
6977                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6978                                        tp->rx_jmb_ring_mask;
6979         }
6980
6981         return err;
6982 }
6983
6984 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6985 {
6986         struct tg3 *tp = tnapi->tp;
6987
6988         /* run TX completion thread */
6989         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6990                 tg3_tx(tnapi);
6991                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6992                         return work_done;
6993         }
6994
6995         if (!tnapi->rx_rcb_prod_idx)
6996                 return work_done;
6997
6998         /* run RX thread, within the bounds set by NAPI.
6999          * All RX "locking" is done by ensuring outside
7000          * code synchronizes with tg3->napi.poll()
7001          */
7002         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7003                 work_done += tg3_rx(tnapi, budget - work_done);
7004
7005         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7006                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7007                 int i, err = 0;
7008                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7009                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7010
7011                 tp->rx_refill = false;
7012                 for (i = 1; i <= tp->rxq_cnt; i++)
7013                         err |= tg3_rx_prodring_xfer(tp, dpr,
7014                                                     &tp->napi[i].prodring);
7015
7016                 wmb();
7017
7018                 if (std_prod_idx != dpr->rx_std_prod_idx)
7019                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7020                                      dpr->rx_std_prod_idx);
7021
7022                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7023                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7024                                      dpr->rx_jmb_prod_idx);
7025
7026                 mmiowb();
7027
7028                 if (err)
7029                         tw32_f(HOSTCC_MODE, tp->coal_now);
7030         }
7031
7032         return work_done;
7033 }
7034
7035 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7036 {
7037         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7038                 schedule_work(&tp->reset_task);
7039 }
7040
7041 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7042 {
7043         cancel_work_sync(&tp->reset_task);
7044         tg3_flag_clear(tp, RESET_TASK_PENDING);
7045         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7046 }
7047
7048 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7049 {
7050         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7051         struct tg3 *tp = tnapi->tp;
7052         int work_done = 0;
7053         struct tg3_hw_status *sblk = tnapi->hw_status;
7054
7055         while (1) {
7056                 work_done = tg3_poll_work(tnapi, work_done, budget);
7057
7058                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7059                         goto tx_recovery;
7060
7061                 if (unlikely(work_done >= budget))
7062                         break;
7063
7064                 /* tp->last_tag is used in tg3_int_reenable() below
7065                  * to tell the hw how much work has been processed,
7066                  * so we must read it before checking for more work.
7067                  */
7068                 tnapi->last_tag = sblk->status_tag;
7069                 tnapi->last_irq_tag = tnapi->last_tag;
7070                 rmb();
7071
7072                 /* check for RX/TX work to do */
7073                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7074                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7075
7076                         /* This test here is not race free, but will reduce
7077                          * the number of interrupts by looping again.
7078                          */
7079                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7080                                 continue;
7081
7082                         napi_complete(napi);
7083                         /* Reenable interrupts. */
7084                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7085
7086                         /* This test here is synchronized by napi_schedule()
7087                          * and napi_complete() to close the race condition.
7088                          */
7089                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7090                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7091                                                   HOSTCC_MODE_ENABLE |
7092                                                   tnapi->coal_now);
7093                         }
7094                         mmiowb();
7095                         break;
7096                 }
7097         }
7098
7099         return work_done;
7100
7101 tx_recovery:
7102         /* work_done is guaranteed to be less than budget. */
7103         napi_complete(napi);
7104         tg3_reset_task_schedule(tp);
7105         return work_done;
7106 }
7107
7108 static void tg3_process_error(struct tg3 *tp)
7109 {
7110         u32 val;
7111         bool real_error = false;
7112
7113         if (tg3_flag(tp, ERROR_PROCESSED))
7114                 return;
7115
7116         /* Check Flow Attention register */
7117         val = tr32(HOSTCC_FLOW_ATTN);
7118         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7119                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7120                 real_error = true;
7121         }
7122
7123         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7124                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7125                 real_error = true;
7126         }
7127
7128         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7129                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7130                 real_error = true;
7131         }
7132
7133         if (!real_error)
7134                 return;
7135
7136         tg3_dump_state(tp);
7137
7138         tg3_flag_set(tp, ERROR_PROCESSED);
7139         tg3_reset_task_schedule(tp);
7140 }
7141
7142 static int tg3_poll(struct napi_struct *napi, int budget)
7143 {
7144         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7145         struct tg3 *tp = tnapi->tp;
7146         int work_done = 0;
7147         struct tg3_hw_status *sblk = tnapi->hw_status;
7148
7149         while (1) {
7150                 if (sblk->status & SD_STATUS_ERROR)
7151                         tg3_process_error(tp);
7152
7153                 tg3_poll_link(tp);
7154
7155                 work_done = tg3_poll_work(tnapi, work_done, budget);
7156
7157                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7158                         goto tx_recovery;
7159
7160                 if (unlikely(work_done >= budget))
7161                         break;
7162
7163                 if (tg3_flag(tp, TAGGED_STATUS)) {
7164                         /* tp->last_tag is used in tg3_int_reenable() below
7165                          * to tell the hw how much work has been processed,
7166                          * so we must read it before checking for more work.
7167                          */
7168                         tnapi->last_tag = sblk->status_tag;
7169                         tnapi->last_irq_tag = tnapi->last_tag;
7170                         rmb();
7171                 } else
7172                         sblk->status &= ~SD_STATUS_UPDATED;
7173
7174                 if (likely(!tg3_has_work(tnapi))) {
7175                         napi_complete(napi);
7176                         tg3_int_reenable(tnapi);
7177                         break;
7178                 }
7179         }
7180
7181         return work_done;
7182
7183 tx_recovery:
7184         /* work_done is guaranteed to be less than budget. */
7185         napi_complete(napi);
7186         tg3_reset_task_schedule(tp);
7187         return work_done;
7188 }
7189
7190 static void tg3_napi_disable(struct tg3 *tp)
7191 {
7192         int i;
7193
7194         for (i = tp->irq_cnt - 1; i >= 0; i--)
7195                 napi_disable(&tp->napi[i].napi);
7196 }
7197
7198 static void tg3_napi_enable(struct tg3 *tp)
7199 {
7200         int i;
7201
7202         for (i = 0; i < tp->irq_cnt; i++)
7203                 napi_enable(&tp->napi[i].napi);
7204 }
7205
7206 static void tg3_napi_init(struct tg3 *tp)
7207 {
7208         int i;
7209
7210         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7211         for (i = 1; i < tp->irq_cnt; i++)
7212                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7213 }
7214
7215 static void tg3_napi_fini(struct tg3 *tp)
7216 {
7217         int i;
7218
7219         for (i = 0; i < tp->irq_cnt; i++)
7220                 netif_napi_del(&tp->napi[i].napi);
7221 }
7222
7223 static inline void tg3_netif_stop(struct tg3 *tp)
7224 {
7225         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7226         tg3_napi_disable(tp);
7227         netif_carrier_off(tp->dev);
7228         netif_tx_disable(tp->dev);
7229 }
7230
7231 /* tp->lock must be held */
7232 static inline void tg3_netif_start(struct tg3 *tp)
7233 {
7234         tg3_ptp_resume(tp);
7235
7236         /* NOTE: unconditional netif_tx_wake_all_queues is only
7237          * appropriate so long as all callers are assured to
7238          * have free tx slots (such as after tg3_init_hw)
7239          */
7240         netif_tx_wake_all_queues(tp->dev);
7241
7242         if (tp->link_up)
7243                 netif_carrier_on(tp->dev);
7244
7245         tg3_napi_enable(tp);
7246         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7247         tg3_enable_ints(tp);
7248 }
7249
7250 static void tg3_irq_quiesce(struct tg3 *tp)
7251 {
7252         int i;
7253
7254         BUG_ON(tp->irq_sync);
7255
7256         tp->irq_sync = 1;
7257         smp_mb();
7258
7259         for (i = 0; i < tp->irq_cnt; i++)
7260                 synchronize_irq(tp->napi[i].irq_vec);
7261 }
7262
7263 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7264  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7265  * with as well.  Most of the time, this is not necessary except when
7266  * shutting down the device.
7267  */
7268 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7269 {
7270         spin_lock_bh(&tp->lock);
7271         if (irq_sync)
7272                 tg3_irq_quiesce(tp);
7273 }
7274
7275 static inline void tg3_full_unlock(struct tg3 *tp)
7276 {
7277         spin_unlock_bh(&tp->lock);
7278 }
7279
7280 /* One-shot MSI handler - Chip automatically disables interrupt
7281  * after sending MSI so driver doesn't have to do it.
7282  */
7283 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7284 {
7285         struct tg3_napi *tnapi = dev_id;
7286         struct tg3 *tp = tnapi->tp;
7287
7288         prefetch(tnapi->hw_status);
7289         if (tnapi->rx_rcb)
7290                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7291
7292         if (likely(!tg3_irq_sync(tp)))
7293                 napi_schedule(&tnapi->napi);
7294
7295         return IRQ_HANDLED;
7296 }
7297
7298 /* MSI ISR - No need to check for interrupt sharing and no need to
7299  * flush status block and interrupt mailbox. PCI ordering rules
7300  * guarantee that MSI will arrive after the status block.
7301  */
7302 static irqreturn_t tg3_msi(int irq, void *dev_id)
7303 {
7304         struct tg3_napi *tnapi = dev_id;
7305         struct tg3 *tp = tnapi->tp;
7306
7307         prefetch(tnapi->hw_status);
7308         if (tnapi->rx_rcb)
7309                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7310         /*
7311          * Writing any value to intr-mbox-0 clears PCI INTA# and
7312          * chip-internal interrupt pending events.
7313          * Writing non-zero to intr-mbox-0 additional tells the
7314          * NIC to stop sending us irqs, engaging "in-intr-handler"
7315          * event coalescing.
7316          */
7317         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7318         if (likely(!tg3_irq_sync(tp)))
7319                 napi_schedule(&tnapi->napi);
7320
7321         return IRQ_RETVAL(1);
7322 }
7323
7324 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7325 {
7326         struct tg3_napi *tnapi = dev_id;
7327         struct tg3 *tp = tnapi->tp;
7328         struct tg3_hw_status *sblk = tnapi->hw_status;
7329         unsigned int handled = 1;
7330
7331         /* In INTx mode, it is possible for the interrupt to arrive at
7332          * the CPU before the status block posted prior to the interrupt.
7333          * Reading the PCI State register will confirm whether the
7334          * interrupt is ours and will flush the status block.
7335          */
7336         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7337                 if (tg3_flag(tp, CHIP_RESETTING) ||
7338                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7339                         handled = 0;
7340                         goto out;
7341                 }
7342         }
7343
7344         /*
7345          * Writing any value to intr-mbox-0 clears PCI INTA# and
7346          * chip-internal interrupt pending events.
7347          * Writing non-zero to intr-mbox-0 additional tells the
7348          * NIC to stop sending us irqs, engaging "in-intr-handler"
7349          * event coalescing.
7350          *
7351          * Flush the mailbox to de-assert the IRQ immediately to prevent
7352          * spurious interrupts.  The flush impacts performance but
7353          * excessive spurious interrupts can be worse in some cases.
7354          */
7355         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7356         if (tg3_irq_sync(tp))
7357                 goto out;
7358         sblk->status &= ~SD_STATUS_UPDATED;
7359         if (likely(tg3_has_work(tnapi))) {
7360                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7361                 napi_schedule(&tnapi->napi);
7362         } else {
7363                 /* No work, shared interrupt perhaps?  re-enable
7364                  * interrupts, and flush that PCI write
7365                  */
7366                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7367                                0x00000000);
7368         }
7369 out:
7370         return IRQ_RETVAL(handled);
7371 }
7372
7373 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7374 {
7375         struct tg3_napi *tnapi = dev_id;
7376         struct tg3 *tp = tnapi->tp;
7377         struct tg3_hw_status *sblk = tnapi->hw_status;
7378         unsigned int handled = 1;
7379
7380         /* In INTx mode, it is possible for the interrupt to arrive at
7381          * the CPU before the status block posted prior to the interrupt.
7382          * Reading the PCI State register will confirm whether the
7383          * interrupt is ours and will flush the status block.
7384          */
7385         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7386                 if (tg3_flag(tp, CHIP_RESETTING) ||
7387                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7388                         handled = 0;
7389                         goto out;
7390                 }
7391         }
7392
7393         /*
7394          * writing any value to intr-mbox-0 clears PCI INTA# and
7395          * chip-internal interrupt pending events.
7396          * writing non-zero to intr-mbox-0 additional tells the
7397          * NIC to stop sending us irqs, engaging "in-intr-handler"
7398          * event coalescing.
7399          *
7400          * Flush the mailbox to de-assert the IRQ immediately to prevent
7401          * spurious interrupts.  The flush impacts performance but
7402          * excessive spurious interrupts can be worse in some cases.
7403          */
7404         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7405
7406         /*
7407          * In a shared interrupt configuration, sometimes other devices'
7408          * interrupts will scream.  We record the current status tag here
7409          * so that the above check can report that the screaming interrupts
7410          * are unhandled.  Eventually they will be silenced.
7411          */
7412         tnapi->last_irq_tag = sblk->status_tag;
7413
7414         if (tg3_irq_sync(tp))
7415                 goto out;
7416
7417         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7418
7419         napi_schedule(&tnapi->napi);
7420
7421 out:
7422         return IRQ_RETVAL(handled);
7423 }
7424
7425 /* ISR for interrupt test */
7426 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7427 {
7428         struct tg3_napi *tnapi = dev_id;
7429         struct tg3 *tp = tnapi->tp;
7430         struct tg3_hw_status *sblk = tnapi->hw_status;
7431
7432         if ((sblk->status & SD_STATUS_UPDATED) ||
7433             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7434                 tg3_disable_ints(tp);
7435                 return IRQ_RETVAL(1);
7436         }
7437         return IRQ_RETVAL(0);
7438 }
7439
7440 #ifdef CONFIG_NET_POLL_CONTROLLER
7441 static void tg3_poll_controller(struct net_device *dev)
7442 {
7443         int i;
7444         struct tg3 *tp = netdev_priv(dev);
7445
7446         if (tg3_irq_sync(tp))
7447                 return;
7448
7449         for (i = 0; i < tp->irq_cnt; i++)
7450                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7451 }
7452 #endif
7453
7454 static void tg3_tx_timeout(struct net_device *dev)
7455 {
7456         struct tg3 *tp = netdev_priv(dev);
7457
7458         if (netif_msg_tx_err(tp)) {
7459                 netdev_err(dev, "transmit timed out, resetting\n");
7460                 tg3_dump_state(tp);
7461         }
7462
7463         tg3_reset_task_schedule(tp);
7464 }
7465
7466 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7467 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7468 {
7469         u32 base = (u32) mapping & 0xffffffff;
7470
7471         return (base > 0xffffdcc0) && (base + len + 8 < base);
7472 }
7473
7474 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7475  * of any 4GB boundaries: 4G, 8G, etc
7476  */
7477 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7478                                            u32 len, u32 mss)
7479 {
7480         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7481                 u32 base = (u32) mapping & 0xffffffff;
7482
7483                 return ((base + len + (mss & 0x3fff)) < base);
7484         }
7485         return 0;
7486 }
7487
7488 /* Test for DMA addresses > 40-bit */
7489 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7490                                           int len)
7491 {
7492 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7493         if (tg3_flag(tp, 40BIT_DMA_BUG))
7494                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7495         return 0;
7496 #else
7497         return 0;
7498 #endif
7499 }
7500
7501 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7502                                  dma_addr_t mapping, u32 len, u32 flags,
7503                                  u32 mss, u32 vlan)
7504 {
7505         txbd->addr_hi = ((u64) mapping >> 32);
7506         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7507         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7508         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7509 }
7510
7511 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7512                             dma_addr_t map, u32 len, u32 flags,
7513                             u32 mss, u32 vlan)
7514 {
7515         struct tg3 *tp = tnapi->tp;
7516         bool hwbug = false;
7517
7518         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7519                 hwbug = true;
7520
7521         if (tg3_4g_overflow_test(map, len))
7522                 hwbug = true;
7523
7524         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7525                 hwbug = true;
7526
7527         if (tg3_40bit_overflow_test(tp, map, len))
7528                 hwbug = true;
7529
7530         if (tp->dma_limit) {
7531                 u32 prvidx = *entry;
7532                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7533                 while (len > tp->dma_limit && *budget) {
7534                         u32 frag_len = tp->dma_limit;
7535                         len -= tp->dma_limit;
7536
7537                         /* Avoid the 8byte DMA problem */
7538                         if (len <= 8) {
7539                                 len += tp->dma_limit / 2;
7540                                 frag_len = tp->dma_limit / 2;
7541                         }
7542
7543                         tnapi->tx_buffers[*entry].fragmented = true;
7544
7545                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7546                                       frag_len, tmp_flag, mss, vlan);
7547                         *budget -= 1;
7548                         prvidx = *entry;
7549                         *entry = NEXT_TX(*entry);
7550
7551                         map += frag_len;
7552                 }
7553
7554                 if (len) {
7555                         if (*budget) {
7556                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7557                                               len, flags, mss, vlan);
7558                                 *budget -= 1;
7559                                 *entry = NEXT_TX(*entry);
7560                         } else {
7561                                 hwbug = true;
7562                                 tnapi->tx_buffers[prvidx].fragmented = false;
7563                         }
7564                 }
7565         } else {
7566                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7567                               len, flags, mss, vlan);
7568                 *entry = NEXT_TX(*entry);
7569         }
7570
7571         return hwbug;
7572 }
7573
7574 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7575 {
7576         int i;
7577         struct sk_buff *skb;
7578         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7579
7580         skb = txb->skb;
7581         txb->skb = NULL;
7582
7583         pci_unmap_single(tnapi->tp->pdev,
7584                          dma_unmap_addr(txb, mapping),
7585                          skb_headlen(skb),
7586                          PCI_DMA_TODEVICE);
7587
7588         while (txb->fragmented) {
7589                 txb->fragmented = false;
7590                 entry = NEXT_TX(entry);
7591                 txb = &tnapi->tx_buffers[entry];
7592         }
7593
7594         for (i = 0; i <= last; i++) {
7595                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7596
7597                 entry = NEXT_TX(entry);
7598                 txb = &tnapi->tx_buffers[entry];
7599
7600                 pci_unmap_page(tnapi->tp->pdev,
7601                                dma_unmap_addr(txb, mapping),
7602                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7603
7604                 while (txb->fragmented) {
7605                         txb->fragmented = false;
7606                         entry = NEXT_TX(entry);
7607                         txb = &tnapi->tx_buffers[entry];
7608                 }
7609         }
7610 }
7611
7612 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7613 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7614                                        struct sk_buff **pskb,
7615                                        u32 *entry, u32 *budget,
7616                                        u32 base_flags, u32 mss, u32 vlan)
7617 {
7618         struct tg3 *tp = tnapi->tp;
7619         struct sk_buff *new_skb, *skb = *pskb;
7620         dma_addr_t new_addr = 0;
7621         int ret = 0;
7622
7623         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7624                 new_skb = skb_copy(skb, GFP_ATOMIC);
7625         else {
7626                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7627
7628                 new_skb = skb_copy_expand(skb,
7629                                           skb_headroom(skb) + more_headroom,
7630                                           skb_tailroom(skb), GFP_ATOMIC);
7631         }
7632
7633         if (!new_skb) {
7634                 ret = -1;
7635         } else {
7636                 /* New SKB is guaranteed to be linear. */
7637                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7638                                           PCI_DMA_TODEVICE);
7639                 /* Make sure the mapping succeeded */
7640                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7641                         dev_kfree_skb(new_skb);
7642                         ret = -1;
7643                 } else {
7644                         u32 save_entry = *entry;
7645
7646                         base_flags |= TXD_FLAG_END;
7647
7648                         tnapi->tx_buffers[*entry].skb = new_skb;
7649                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7650                                            mapping, new_addr);
7651
7652                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7653                                             new_skb->len, base_flags,
7654                                             mss, vlan)) {
7655                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7656                                 dev_kfree_skb(new_skb);
7657                                 ret = -1;
7658                         }
7659                 }
7660         }
7661
7662         dev_kfree_skb(skb);
7663         *pskb = new_skb;
7664         return ret;
7665 }
7666
7667 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7668
7669 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7670  * TSO header is greater than 80 bytes.
7671  */
7672 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7673 {
7674         struct sk_buff *segs, *nskb;
7675         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7676
7677         /* Estimate the number of fragments in the worst case */
7678         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7679                 netif_stop_queue(tp->dev);
7680
7681                 /* netif_tx_stop_queue() must be done before checking
7682                  * checking tx index in tg3_tx_avail() below, because in
7683                  * tg3_tx(), we update tx index before checking for
7684                  * netif_tx_queue_stopped().
7685                  */
7686                 smp_mb();
7687                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7688                         return NETDEV_TX_BUSY;
7689
7690                 netif_wake_queue(tp->dev);
7691         }
7692
7693         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7694         if (IS_ERR(segs))
7695                 goto tg3_tso_bug_end;
7696
7697         do {
7698                 nskb = segs;
7699                 segs = segs->next;
7700                 nskb->next = NULL;
7701                 tg3_start_xmit(nskb, tp->dev);
7702         } while (segs);
7703
7704 tg3_tso_bug_end:
7705         dev_kfree_skb(skb);
7706
7707         return NETDEV_TX_OK;
7708 }
7709
7710 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7711  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7712  */
7713 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7714 {
7715         struct tg3 *tp = netdev_priv(dev);
7716         u32 len, entry, base_flags, mss, vlan = 0;
7717         u32 budget;
7718         int i = -1, would_hit_hwbug;
7719         dma_addr_t mapping;
7720         struct tg3_napi *tnapi;
7721         struct netdev_queue *txq;
7722         unsigned int last;
7723
7724         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7725         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7726         if (tg3_flag(tp, ENABLE_TSS))
7727                 tnapi++;
7728
7729         budget = tg3_tx_avail(tnapi);
7730
7731         /* We are running in BH disabled context with netif_tx_lock
7732          * and TX reclaim runs via tp->napi.poll inside of a software
7733          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7734          * no IRQ context deadlocks to worry about either.  Rejoice!
7735          */
7736         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7737                 if (!netif_tx_queue_stopped(txq)) {
7738                         netif_tx_stop_queue(txq);
7739
7740                         /* This is a hard error, log it. */
7741                         netdev_err(dev,
7742                                    "BUG! Tx Ring full when queue awake!\n");
7743                 }
7744                 return NETDEV_TX_BUSY;
7745         }
7746
7747         entry = tnapi->tx_prod;
7748         base_flags = 0;
7749         if (skb->ip_summed == CHECKSUM_PARTIAL)
7750                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7751
7752         mss = skb_shinfo(skb)->gso_size;
7753         if (mss) {
7754                 struct iphdr *iph;
7755                 u32 tcp_opt_len, hdr_len;
7756
7757                 if (skb_header_cloned(skb) &&
7758                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7759                         goto drop;
7760
7761                 iph = ip_hdr(skb);
7762                 tcp_opt_len = tcp_optlen(skb);
7763
7764                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7765
7766                 if (!skb_is_gso_v6(skb)) {
7767                         iph->check = 0;
7768                         iph->tot_len = htons(mss + hdr_len);
7769                 }
7770
7771                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7772                     tg3_flag(tp, TSO_BUG))
7773                         return tg3_tso_bug(tp, skb);
7774
7775                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7776                                TXD_FLAG_CPU_POST_DMA);
7777
7778                 if (tg3_flag(tp, HW_TSO_1) ||
7779                     tg3_flag(tp, HW_TSO_2) ||
7780                     tg3_flag(tp, HW_TSO_3)) {
7781                         tcp_hdr(skb)->check = 0;
7782                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7783                 } else
7784                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7785                                                                  iph->daddr, 0,
7786                                                                  IPPROTO_TCP,
7787                                                                  0);
7788
7789                 if (tg3_flag(tp, HW_TSO_3)) {
7790                         mss |= (hdr_len & 0xc) << 12;
7791                         if (hdr_len & 0x10)
7792                                 base_flags |= 0x00000010;
7793                         base_flags |= (hdr_len & 0x3e0) << 5;
7794                 } else if (tg3_flag(tp, HW_TSO_2))
7795                         mss |= hdr_len << 9;
7796                 else if (tg3_flag(tp, HW_TSO_1) ||
7797                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7798                         if (tcp_opt_len || iph->ihl > 5) {
7799                                 int tsflags;
7800
7801                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7802                                 mss |= (tsflags << 11);
7803                         }
7804                 } else {
7805                         if (tcp_opt_len || iph->ihl > 5) {
7806                                 int tsflags;
7807
7808                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7809                                 base_flags |= tsflags << 12;
7810                         }
7811                 }
7812         }
7813
7814         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7815             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7816                 base_flags |= TXD_FLAG_JMB_PKT;
7817
7818         if (vlan_tx_tag_present(skb)) {
7819                 base_flags |= TXD_FLAG_VLAN;
7820                 vlan = vlan_tx_tag_get(skb);
7821         }
7822
7823         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7824             tg3_flag(tp, TX_TSTAMP_EN)) {
7825                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7826                 base_flags |= TXD_FLAG_HWTSTAMP;
7827         }
7828
7829         len = skb_headlen(skb);
7830
7831         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7832         if (pci_dma_mapping_error(tp->pdev, mapping))
7833                 goto drop;
7834
7835
7836         tnapi->tx_buffers[entry].skb = skb;
7837         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7838
7839         would_hit_hwbug = 0;
7840
7841         if (tg3_flag(tp, 5701_DMA_BUG))
7842                 would_hit_hwbug = 1;
7843
7844         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7845                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7846                             mss, vlan)) {
7847                 would_hit_hwbug = 1;
7848         } else if (skb_shinfo(skb)->nr_frags > 0) {
7849                 u32 tmp_mss = mss;
7850
7851                 if (!tg3_flag(tp, HW_TSO_1) &&
7852                     !tg3_flag(tp, HW_TSO_2) &&
7853                     !tg3_flag(tp, HW_TSO_3))
7854                         tmp_mss = 0;
7855
7856                 /* Now loop through additional data
7857                  * fragments, and queue them.
7858                  */
7859                 last = skb_shinfo(skb)->nr_frags - 1;
7860                 for (i = 0; i <= last; i++) {
7861                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7862
7863                         len = skb_frag_size(frag);
7864                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7865                                                    len, DMA_TO_DEVICE);
7866
7867                         tnapi->tx_buffers[entry].skb = NULL;
7868                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7869                                            mapping);
7870                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7871                                 goto dma_error;
7872
7873                         if (!budget ||
7874                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7875                                             len, base_flags |
7876                                             ((i == last) ? TXD_FLAG_END : 0),
7877                                             tmp_mss, vlan)) {
7878                                 would_hit_hwbug = 1;
7879                                 break;
7880                         }
7881                 }
7882         }
7883
7884         if (would_hit_hwbug) {
7885                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7886
7887                 /* If the workaround fails due to memory/mapping
7888                  * failure, silently drop this packet.
7889                  */
7890                 entry = tnapi->tx_prod;
7891                 budget = tg3_tx_avail(tnapi);
7892                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7893                                                 base_flags, mss, vlan))
7894                         goto drop_nofree;
7895         }
7896
7897         skb_tx_timestamp(skb);
7898         netdev_tx_sent_queue(txq, skb->len);
7899
7900         /* Sync BD data before updating mailbox */
7901         wmb();
7902
7903         /* Packets are ready, update Tx producer idx local and on card. */
7904         tw32_tx_mbox(tnapi->prodmbox, entry);
7905
7906         tnapi->tx_prod = entry;
7907         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7908                 netif_tx_stop_queue(txq);
7909
7910                 /* netif_tx_stop_queue() must be done before checking
7911                  * checking tx index in tg3_tx_avail() below, because in
7912                  * tg3_tx(), we update tx index before checking for
7913                  * netif_tx_queue_stopped().
7914                  */
7915                 smp_mb();
7916                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7917                         netif_tx_wake_queue(txq);
7918         }
7919
7920         mmiowb();
7921         return NETDEV_TX_OK;
7922
7923 dma_error:
7924         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7925         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7926 drop:
7927         dev_kfree_skb(skb);
7928 drop_nofree:
7929         tp->tx_dropped++;
7930         return NETDEV_TX_OK;
7931 }
7932
7933 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7934 {
7935         if (enable) {
7936                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7937                                   MAC_MODE_PORT_MODE_MASK);
7938
7939                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7940
7941                 if (!tg3_flag(tp, 5705_PLUS))
7942                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7943
7944                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7945                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7946                 else
7947                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7948         } else {
7949                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7950
7951                 if (tg3_flag(tp, 5705_PLUS) ||
7952                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7953                     tg3_asic_rev(tp) == ASIC_REV_5700)
7954                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7955         }
7956
7957         tw32(MAC_MODE, tp->mac_mode);
7958         udelay(40);
7959 }
7960
7961 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7962 {
7963         u32 val, bmcr, mac_mode, ptest = 0;
7964
7965         tg3_phy_toggle_apd(tp, false);
7966         tg3_phy_toggle_automdix(tp, false);
7967
7968         if (extlpbk && tg3_phy_set_extloopbk(tp))
7969                 return -EIO;
7970
7971         bmcr = BMCR_FULLDPLX;
7972         switch (speed) {
7973         case SPEED_10:
7974                 break;
7975         case SPEED_100:
7976                 bmcr |= BMCR_SPEED100;
7977                 break;
7978         case SPEED_1000:
7979         default:
7980                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7981                         speed = SPEED_100;
7982                         bmcr |= BMCR_SPEED100;
7983                 } else {
7984                         speed = SPEED_1000;
7985                         bmcr |= BMCR_SPEED1000;
7986                 }
7987         }
7988
7989         if (extlpbk) {
7990                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7991                         tg3_readphy(tp, MII_CTRL1000, &val);
7992                         val |= CTL1000_AS_MASTER |
7993                                CTL1000_ENABLE_MASTER;
7994                         tg3_writephy(tp, MII_CTRL1000, val);
7995                 } else {
7996                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7997                                 MII_TG3_FET_PTEST_TRIM_2;
7998                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7999                 }
8000         } else
8001                 bmcr |= BMCR_LOOPBACK;
8002
8003         tg3_writephy(tp, MII_BMCR, bmcr);
8004
8005         /* The write needs to be flushed for the FETs */
8006         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8007                 tg3_readphy(tp, MII_BMCR, &bmcr);
8008
8009         udelay(40);
8010
8011         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8012             tg3_asic_rev(tp) == ASIC_REV_5785) {
8013                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8014                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8015                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8016
8017                 /* The write needs to be flushed for the AC131 */
8018                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8019         }
8020
8021         /* Reset to prevent losing 1st rx packet intermittently */
8022         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8023             tg3_flag(tp, 5780_CLASS)) {
8024                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8025                 udelay(10);
8026                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8027         }
8028
8029         mac_mode = tp->mac_mode &
8030                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8031         if (speed == SPEED_1000)
8032                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8033         else
8034                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8035
8036         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8037                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8038
8039                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8040                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8041                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8042                         mac_mode |= MAC_MODE_LINK_POLARITY;
8043
8044                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8045                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8046         }
8047
8048         tw32(MAC_MODE, mac_mode);
8049         udelay(40);
8050
8051         return 0;
8052 }
8053
8054 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8055 {
8056         struct tg3 *tp = netdev_priv(dev);
8057
8058         if (features & NETIF_F_LOOPBACK) {
8059                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8060                         return;
8061
8062                 spin_lock_bh(&tp->lock);
8063                 tg3_mac_loopback(tp, true);
8064                 netif_carrier_on(tp->dev);
8065                 spin_unlock_bh(&tp->lock);
8066                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8067         } else {
8068                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8069                         return;
8070
8071                 spin_lock_bh(&tp->lock);
8072                 tg3_mac_loopback(tp, false);
8073                 /* Force link status check */
8074                 tg3_setup_phy(tp, true);
8075                 spin_unlock_bh(&tp->lock);
8076                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8077         }
8078 }
8079
8080 static netdev_features_t tg3_fix_features(struct net_device *dev,
8081         netdev_features_t features)
8082 {
8083         struct tg3 *tp = netdev_priv(dev);
8084
8085         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8086                 features &= ~NETIF_F_ALL_TSO;
8087
8088         return features;
8089 }
8090
8091 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8092 {
8093         netdev_features_t changed = dev->features ^ features;
8094
8095         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8096                 tg3_set_loopback(dev, features);
8097
8098         return 0;
8099 }
8100
8101 static void tg3_rx_prodring_free(struct tg3 *tp,
8102                                  struct tg3_rx_prodring_set *tpr)
8103 {
8104         int i;
8105
8106         if (tpr != &tp->napi[0].prodring) {
8107                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8108                      i = (i + 1) & tp->rx_std_ring_mask)
8109                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8110                                         tp->rx_pkt_map_sz);
8111
8112                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8113                         for (i = tpr->rx_jmb_cons_idx;
8114                              i != tpr->rx_jmb_prod_idx;
8115                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8116                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8117                                                 TG3_RX_JMB_MAP_SZ);
8118                         }
8119                 }
8120
8121                 return;
8122         }
8123
8124         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8125                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8126                                 tp->rx_pkt_map_sz);
8127
8128         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8129                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8130                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8131                                         TG3_RX_JMB_MAP_SZ);
8132         }
8133 }
8134
8135 /* Initialize rx rings for packet processing.
8136  *
8137  * The chip has been shut down and the driver detached from
8138  * the networking, so no interrupts or new tx packets will
8139  * end up in the driver.  tp->{tx,}lock are held and thus
8140  * we may not sleep.
8141  */
8142 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8143                                  struct tg3_rx_prodring_set *tpr)
8144 {
8145         u32 i, rx_pkt_dma_sz;
8146
8147         tpr->rx_std_cons_idx = 0;
8148         tpr->rx_std_prod_idx = 0;
8149         tpr->rx_jmb_cons_idx = 0;
8150         tpr->rx_jmb_prod_idx = 0;
8151
8152         if (tpr != &tp->napi[0].prodring) {
8153                 memset(&tpr->rx_std_buffers[0], 0,
8154                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8155                 if (tpr->rx_jmb_buffers)
8156                         memset(&tpr->rx_jmb_buffers[0], 0,
8157                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8158                 goto done;
8159         }
8160
8161         /* Zero out all descriptors. */
8162         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8163
8164         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8165         if (tg3_flag(tp, 5780_CLASS) &&
8166             tp->dev->mtu > ETH_DATA_LEN)
8167                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8168         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8169
8170         /* Initialize invariants of the rings, we only set this
8171          * stuff once.  This works because the card does not
8172          * write into the rx buffer posting rings.
8173          */
8174         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8175                 struct tg3_rx_buffer_desc *rxd;
8176
8177                 rxd = &tpr->rx_std[i];
8178                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8179                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8180                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8181                                (i << RXD_OPAQUE_INDEX_SHIFT));
8182         }
8183
8184         /* Now allocate fresh SKBs for each rx ring. */
8185         for (i = 0; i < tp->rx_pending; i++) {
8186                 unsigned int frag_size;
8187
8188                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8189                                       &frag_size) < 0) {
8190                         netdev_warn(tp->dev,
8191                                     "Using a smaller RX standard ring. Only "
8192                                     "%d out of %d buffers were allocated "
8193                                     "successfully\n", i, tp->rx_pending);
8194                         if (i == 0)
8195                                 goto initfail;
8196                         tp->rx_pending = i;
8197                         break;
8198                 }
8199         }
8200
8201         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8202                 goto done;
8203
8204         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8205
8206         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8207                 goto done;
8208
8209         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8210                 struct tg3_rx_buffer_desc *rxd;
8211
8212                 rxd = &tpr->rx_jmb[i].std;
8213                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8214                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8215                                   RXD_FLAG_JUMBO;
8216                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8217                        (i << RXD_OPAQUE_INDEX_SHIFT));
8218         }
8219
8220         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8221                 unsigned int frag_size;
8222
8223                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8224                                       &frag_size) < 0) {
8225                         netdev_warn(tp->dev,
8226                                     "Using a smaller RX jumbo ring. Only %d "
8227                                     "out of %d buffers were allocated "
8228                                     "successfully\n", i, tp->rx_jumbo_pending);
8229                         if (i == 0)
8230                                 goto initfail;
8231                         tp->rx_jumbo_pending = i;
8232                         break;
8233                 }
8234         }
8235
8236 done:
8237         return 0;
8238
8239 initfail:
8240         tg3_rx_prodring_free(tp, tpr);
8241         return -ENOMEM;
8242 }
8243
8244 static void tg3_rx_prodring_fini(struct tg3 *tp,
8245                                  struct tg3_rx_prodring_set *tpr)
8246 {
8247         kfree(tpr->rx_std_buffers);
8248         tpr->rx_std_buffers = NULL;
8249         kfree(tpr->rx_jmb_buffers);
8250         tpr->rx_jmb_buffers = NULL;
8251         if (tpr->rx_std) {
8252                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8253                                   tpr->rx_std, tpr->rx_std_mapping);
8254                 tpr->rx_std = NULL;
8255         }
8256         if (tpr->rx_jmb) {
8257                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8258                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8259                 tpr->rx_jmb = NULL;
8260         }
8261 }
8262
8263 static int tg3_rx_prodring_init(struct tg3 *tp,
8264                                 struct tg3_rx_prodring_set *tpr)
8265 {
8266         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8267                                       GFP_KERNEL);
8268         if (!tpr->rx_std_buffers)
8269                 return -ENOMEM;
8270
8271         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8272                                          TG3_RX_STD_RING_BYTES(tp),
8273                                          &tpr->rx_std_mapping,
8274                                          GFP_KERNEL);
8275         if (!tpr->rx_std)
8276                 goto err_out;
8277
8278         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8279                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8280                                               GFP_KERNEL);
8281                 if (!tpr->rx_jmb_buffers)
8282                         goto err_out;
8283
8284                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8285                                                  TG3_RX_JMB_RING_BYTES(tp),
8286                                                  &tpr->rx_jmb_mapping,
8287                                                  GFP_KERNEL);
8288                 if (!tpr->rx_jmb)
8289                         goto err_out;
8290         }
8291
8292         return 0;
8293
8294 err_out:
8295         tg3_rx_prodring_fini(tp, tpr);
8296         return -ENOMEM;
8297 }
8298
8299 /* Free up pending packets in all rx/tx rings.
8300  *
8301  * The chip has been shut down and the driver detached from
8302  * the networking, so no interrupts or new tx packets will
8303  * end up in the driver.  tp->{tx,}lock is not held and we are not
8304  * in an interrupt context and thus may sleep.
8305  */
8306 static void tg3_free_rings(struct tg3 *tp)
8307 {
8308         int i, j;
8309
8310         for (j = 0; j < tp->irq_cnt; j++) {
8311                 struct tg3_napi *tnapi = &tp->napi[j];
8312
8313                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8314
8315                 if (!tnapi->tx_buffers)
8316                         continue;
8317
8318                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8319                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8320
8321                         if (!skb)
8322                                 continue;
8323
8324                         tg3_tx_skb_unmap(tnapi, i,
8325                                          skb_shinfo(skb)->nr_frags - 1);
8326
8327                         dev_kfree_skb_any(skb);
8328                 }
8329                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8330         }
8331 }
8332
8333 /* Initialize tx/rx rings for packet processing.
8334  *
8335  * The chip has been shut down and the driver detached from
8336  * the networking, so no interrupts or new tx packets will
8337  * end up in the driver.  tp->{tx,}lock are held and thus
8338  * we may not sleep.
8339  */
8340 static int tg3_init_rings(struct tg3 *tp)
8341 {
8342         int i;
8343
8344         /* Free up all the SKBs. */
8345         tg3_free_rings(tp);
8346
8347         for (i = 0; i < tp->irq_cnt; i++) {
8348                 struct tg3_napi *tnapi = &tp->napi[i];
8349
8350                 tnapi->last_tag = 0;
8351                 tnapi->last_irq_tag = 0;
8352                 tnapi->hw_status->status = 0;
8353                 tnapi->hw_status->status_tag = 0;
8354                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8355
8356                 tnapi->tx_prod = 0;
8357                 tnapi->tx_cons = 0;
8358                 if (tnapi->tx_ring)
8359                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8360
8361                 tnapi->rx_rcb_ptr = 0;
8362                 if (tnapi->rx_rcb)
8363                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8364
8365                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8366                         tg3_free_rings(tp);
8367                         return -ENOMEM;
8368                 }
8369         }
8370
8371         return 0;
8372 }
8373
8374 static void tg3_mem_tx_release(struct tg3 *tp)
8375 {
8376         int i;
8377
8378         for (i = 0; i < tp->irq_max; i++) {
8379                 struct tg3_napi *tnapi = &tp->napi[i];
8380
8381                 if (tnapi->tx_ring) {
8382                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8383                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8384                         tnapi->tx_ring = NULL;
8385                 }
8386
8387                 kfree(tnapi->tx_buffers);
8388                 tnapi->tx_buffers = NULL;
8389         }
8390 }
8391
8392 static int tg3_mem_tx_acquire(struct tg3 *tp)
8393 {
8394         int i;
8395         struct tg3_napi *tnapi = &tp->napi[0];
8396
8397         /* If multivector TSS is enabled, vector 0 does not handle
8398          * tx interrupts.  Don't allocate any resources for it.
8399          */
8400         if (tg3_flag(tp, ENABLE_TSS))
8401                 tnapi++;
8402
8403         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8404                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8405                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8406                 if (!tnapi->tx_buffers)
8407                         goto err_out;
8408
8409                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8410                                                     TG3_TX_RING_BYTES,
8411                                                     &tnapi->tx_desc_mapping,
8412                                                     GFP_KERNEL);
8413                 if (!tnapi->tx_ring)
8414                         goto err_out;
8415         }
8416
8417         return 0;
8418
8419 err_out:
8420         tg3_mem_tx_release(tp);
8421         return -ENOMEM;
8422 }
8423
8424 static void tg3_mem_rx_release(struct tg3 *tp)
8425 {
8426         int i;
8427
8428         for (i = 0; i < tp->irq_max; i++) {
8429                 struct tg3_napi *tnapi = &tp->napi[i];
8430
8431                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8432
8433                 if (!tnapi->rx_rcb)
8434                         continue;
8435
8436                 dma_free_coherent(&tp->pdev->dev,
8437                                   TG3_RX_RCB_RING_BYTES(tp),
8438                                   tnapi->rx_rcb,
8439                                   tnapi->rx_rcb_mapping);
8440                 tnapi->rx_rcb = NULL;
8441         }
8442 }
8443
8444 static int tg3_mem_rx_acquire(struct tg3 *tp)
8445 {
8446         unsigned int i, limit;
8447
8448         limit = tp->rxq_cnt;
8449
8450         /* If RSS is enabled, we need a (dummy) producer ring
8451          * set on vector zero.  This is the true hw prodring.
8452          */
8453         if (tg3_flag(tp, ENABLE_RSS))
8454                 limit++;
8455
8456         for (i = 0; i < limit; i++) {
8457                 struct tg3_napi *tnapi = &tp->napi[i];
8458
8459                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8460                         goto err_out;
8461
8462                 /* If multivector RSS is enabled, vector 0
8463                  * does not handle rx or tx interrupts.
8464                  * Don't allocate any resources for it.
8465                  */
8466                 if (!i && tg3_flag(tp, ENABLE_RSS))
8467                         continue;
8468
8469                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8470                                                    TG3_RX_RCB_RING_BYTES(tp),
8471                                                    &tnapi->rx_rcb_mapping,
8472                                                    GFP_KERNEL | __GFP_ZERO);
8473                 if (!tnapi->rx_rcb)
8474                         goto err_out;
8475         }
8476
8477         return 0;
8478
8479 err_out:
8480         tg3_mem_rx_release(tp);
8481         return -ENOMEM;
8482 }
8483
8484 /*
8485  * Must not be invoked with interrupt sources disabled and
8486  * the hardware shutdown down.
8487  */
8488 static void tg3_free_consistent(struct tg3 *tp)
8489 {
8490         int i;
8491
8492         for (i = 0; i < tp->irq_cnt; i++) {
8493                 struct tg3_napi *tnapi = &tp->napi[i];
8494
8495                 if (tnapi->hw_status) {
8496                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8497                                           tnapi->hw_status,
8498                                           tnapi->status_mapping);
8499                         tnapi->hw_status = NULL;
8500                 }
8501         }
8502
8503         tg3_mem_rx_release(tp);
8504         tg3_mem_tx_release(tp);
8505
8506         if (tp->hw_stats) {
8507                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8508                                   tp->hw_stats, tp->stats_mapping);
8509                 tp->hw_stats = NULL;
8510         }
8511 }
8512
8513 /*
8514  * Must not be invoked with interrupt sources disabled and
8515  * the hardware shutdown down.  Can sleep.
8516  */
8517 static int tg3_alloc_consistent(struct tg3 *tp)
8518 {
8519         int i;
8520
8521         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8522                                           sizeof(struct tg3_hw_stats),
8523                                           &tp->stats_mapping,
8524                                           GFP_KERNEL | __GFP_ZERO);
8525         if (!tp->hw_stats)
8526                 goto err_out;
8527
8528         for (i = 0; i < tp->irq_cnt; i++) {
8529                 struct tg3_napi *tnapi = &tp->napi[i];
8530                 struct tg3_hw_status *sblk;
8531
8532                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8533                                                       TG3_HW_STATUS_SIZE,
8534                                                       &tnapi->status_mapping,
8535                                                       GFP_KERNEL | __GFP_ZERO);
8536                 if (!tnapi->hw_status)
8537                         goto err_out;
8538
8539                 sblk = tnapi->hw_status;
8540
8541                 if (tg3_flag(tp, ENABLE_RSS)) {
8542                         u16 *prodptr = NULL;
8543
8544                         /*
8545                          * When RSS is enabled, the status block format changes
8546                          * slightly.  The "rx_jumbo_consumer", "reserved",
8547                          * and "rx_mini_consumer" members get mapped to the
8548                          * other three rx return ring producer indexes.
8549                          */
8550                         switch (i) {
8551                         case 1:
8552                                 prodptr = &sblk->idx[0].rx_producer;
8553                                 break;
8554                         case 2:
8555                                 prodptr = &sblk->rx_jumbo_consumer;
8556                                 break;
8557                         case 3:
8558                                 prodptr = &sblk->reserved;
8559                                 break;
8560                         case 4:
8561                                 prodptr = &sblk->rx_mini_consumer;
8562                                 break;
8563                         }
8564                         tnapi->rx_rcb_prod_idx = prodptr;
8565                 } else {
8566                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8567                 }
8568         }
8569
8570         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8571                 goto err_out;
8572
8573         return 0;
8574
8575 err_out:
8576         tg3_free_consistent(tp);
8577         return -ENOMEM;
8578 }
8579
8580 #define MAX_WAIT_CNT 1000
8581
8582 /* To stop a block, clear the enable bit and poll till it
8583  * clears.  tp->lock is held.
8584  */
8585 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8586 {
8587         unsigned int i;
8588         u32 val;
8589
8590         if (tg3_flag(tp, 5705_PLUS)) {
8591                 switch (ofs) {
8592                 case RCVLSC_MODE:
8593                 case DMAC_MODE:
8594                 case MBFREE_MODE:
8595                 case BUFMGR_MODE:
8596                 case MEMARB_MODE:
8597                         /* We can't enable/disable these bits of the
8598                          * 5705/5750, just say success.
8599                          */
8600                         return 0;
8601
8602                 default:
8603                         break;
8604                 }
8605         }
8606
8607         val = tr32(ofs);
8608         val &= ~enable_bit;
8609         tw32_f(ofs, val);
8610
8611         for (i = 0; i < MAX_WAIT_CNT; i++) {
8612                 if (pci_channel_offline(tp->pdev)) {
8613                         dev_err(&tp->pdev->dev,
8614                                 "tg3_stop_block device offline, "
8615                                 "ofs=%lx enable_bit=%x\n",
8616                                 ofs, enable_bit);
8617                         return -ENODEV;
8618                 }
8619
8620                 udelay(100);
8621                 val = tr32(ofs);
8622                 if ((val & enable_bit) == 0)
8623                         break;
8624         }
8625
8626         if (i == MAX_WAIT_CNT && !silent) {
8627                 dev_err(&tp->pdev->dev,
8628                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8629                         ofs, enable_bit);
8630                 return -ENODEV;
8631         }
8632
8633         return 0;
8634 }
8635
8636 /* tp->lock is held. */
8637 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8638 {
8639         int i, err;
8640
8641         tg3_disable_ints(tp);
8642
8643         if (pci_channel_offline(tp->pdev)) {
8644                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8645                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8646                 err = -ENODEV;
8647                 goto err_no_dev;
8648         }
8649
8650         tp->rx_mode &= ~RX_MODE_ENABLE;
8651         tw32_f(MAC_RX_MODE, tp->rx_mode);
8652         udelay(10);
8653
8654         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8655         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8656         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8657         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8658         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8659         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8660
8661         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8662         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8663         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8664         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8665         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8666         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8667         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8668
8669         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8670         tw32_f(MAC_MODE, tp->mac_mode);
8671         udelay(40);
8672
8673         tp->tx_mode &= ~TX_MODE_ENABLE;
8674         tw32_f(MAC_TX_MODE, tp->tx_mode);
8675
8676         for (i = 0; i < MAX_WAIT_CNT; i++) {
8677                 udelay(100);
8678                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8679                         break;
8680         }
8681         if (i >= MAX_WAIT_CNT) {
8682                 dev_err(&tp->pdev->dev,
8683                         "%s timed out, TX_MODE_ENABLE will not clear "
8684                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8685                 err |= -ENODEV;
8686         }
8687
8688         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8689         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8690         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8691
8692         tw32(FTQ_RESET, 0xffffffff);
8693         tw32(FTQ_RESET, 0x00000000);
8694
8695         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8696         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8697
8698 err_no_dev:
8699         for (i = 0; i < tp->irq_cnt; i++) {
8700                 struct tg3_napi *tnapi = &tp->napi[i];
8701                 if (tnapi->hw_status)
8702                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8703         }
8704
8705         return err;
8706 }
8707
8708 /* Save PCI command register before chip reset */
8709 static void tg3_save_pci_state(struct tg3 *tp)
8710 {
8711         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8712 }
8713
8714 /* Restore PCI state after chip reset */
8715 static void tg3_restore_pci_state(struct tg3 *tp)
8716 {
8717         u32 val;
8718
8719         /* Re-enable indirect register accesses. */
8720         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8721                                tp->misc_host_ctrl);
8722
8723         /* Set MAX PCI retry to zero. */
8724         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8725         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8726             tg3_flag(tp, PCIX_MODE))
8727                 val |= PCISTATE_RETRY_SAME_DMA;
8728         /* Allow reads and writes to the APE register and memory space. */
8729         if (tg3_flag(tp, ENABLE_APE))
8730                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8731                        PCISTATE_ALLOW_APE_SHMEM_WR |
8732                        PCISTATE_ALLOW_APE_PSPACE_WR;
8733         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8734
8735         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8736
8737         if (!tg3_flag(tp, PCI_EXPRESS)) {
8738                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8739                                       tp->pci_cacheline_sz);
8740                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8741                                       tp->pci_lat_timer);
8742         }
8743
8744         /* Make sure PCI-X relaxed ordering bit is clear. */
8745         if (tg3_flag(tp, PCIX_MODE)) {
8746                 u16 pcix_cmd;
8747
8748                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8749                                      &pcix_cmd);
8750                 pcix_cmd &= ~PCI_X_CMD_ERO;
8751                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8752                                       pcix_cmd);
8753         }
8754
8755         if (tg3_flag(tp, 5780_CLASS)) {
8756
8757                 /* Chip reset on 5780 will reset MSI enable bit,
8758                  * so need to restore it.
8759                  */
8760                 if (tg3_flag(tp, USING_MSI)) {
8761                         u16 ctrl;
8762
8763                         pci_read_config_word(tp->pdev,
8764                                              tp->msi_cap + PCI_MSI_FLAGS,
8765                                              &ctrl);
8766                         pci_write_config_word(tp->pdev,
8767                                               tp->msi_cap + PCI_MSI_FLAGS,
8768                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8769                         val = tr32(MSGINT_MODE);
8770                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8771                 }
8772         }
8773 }
8774
8775 /* tp->lock is held. */
8776 static int tg3_chip_reset(struct tg3 *tp)
8777 {
8778         u32 val;
8779         void (*write_op)(struct tg3 *, u32, u32);
8780         int i, err;
8781
8782         tg3_nvram_lock(tp);
8783
8784         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8785
8786         /* No matching tg3_nvram_unlock() after this because
8787          * chip reset below will undo the nvram lock.
8788          */
8789         tp->nvram_lock_cnt = 0;
8790
8791         /* GRC_MISC_CFG core clock reset will clear the memory
8792          * enable bit in PCI register 4 and the MSI enable bit
8793          * on some chips, so we save relevant registers here.
8794          */
8795         tg3_save_pci_state(tp);
8796
8797         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8798             tg3_flag(tp, 5755_PLUS))
8799                 tw32(GRC_FASTBOOT_PC, 0);
8800
8801         /*
8802          * We must avoid the readl() that normally takes place.
8803          * It locks machines, causes machine checks, and other
8804          * fun things.  So, temporarily disable the 5701
8805          * hardware workaround, while we do the reset.
8806          */
8807         write_op = tp->write32;
8808         if (write_op == tg3_write_flush_reg32)
8809                 tp->write32 = tg3_write32;
8810
8811         /* Prevent the irq handler from reading or writing PCI registers
8812          * during chip reset when the memory enable bit in the PCI command
8813          * register may be cleared.  The chip does not generate interrupt
8814          * at this time, but the irq handler may still be called due to irq
8815          * sharing or irqpoll.
8816          */
8817         tg3_flag_set(tp, CHIP_RESETTING);
8818         for (i = 0; i < tp->irq_cnt; i++) {
8819                 struct tg3_napi *tnapi = &tp->napi[i];
8820                 if (tnapi->hw_status) {
8821                         tnapi->hw_status->status = 0;
8822                         tnapi->hw_status->status_tag = 0;
8823                 }
8824                 tnapi->last_tag = 0;
8825                 tnapi->last_irq_tag = 0;
8826         }
8827         smp_mb();
8828
8829         for (i = 0; i < tp->irq_cnt; i++)
8830                 synchronize_irq(tp->napi[i].irq_vec);
8831
8832         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8833                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8834                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8835         }
8836
8837         /* do the reset */
8838         val = GRC_MISC_CFG_CORECLK_RESET;
8839
8840         if (tg3_flag(tp, PCI_EXPRESS)) {
8841                 /* Force PCIe 1.0a mode */
8842                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8843                     !tg3_flag(tp, 57765_PLUS) &&
8844                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8845                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8846                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8847
8848                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8849                         tw32(GRC_MISC_CFG, (1 << 29));
8850                         val |= (1 << 29);
8851                 }
8852         }
8853
8854         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8855                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8856                 tw32(GRC_VCPU_EXT_CTRL,
8857                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8858         }
8859
8860         /* Manage gphy power for all CPMU absent PCIe devices. */
8861         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8862                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8863
8864         tw32(GRC_MISC_CFG, val);
8865
8866         /* restore 5701 hardware bug workaround write method */
8867         tp->write32 = write_op;
8868
8869         /* Unfortunately, we have to delay before the PCI read back.
8870          * Some 575X chips even will not respond to a PCI cfg access
8871          * when the reset command is given to the chip.
8872          *
8873          * How do these hardware designers expect things to work
8874          * properly if the PCI write is posted for a long period
8875          * of time?  It is always necessary to have some method by
8876          * which a register read back can occur to push the write
8877          * out which does the reset.
8878          *
8879          * For most tg3 variants the trick below was working.
8880          * Ho hum...
8881          */
8882         udelay(120);
8883
8884         /* Flush PCI posted writes.  The normal MMIO registers
8885          * are inaccessible at this time so this is the only
8886          * way to make this reliably (actually, this is no longer
8887          * the case, see above).  I tried to use indirect
8888          * register read/write but this upset some 5701 variants.
8889          */
8890         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8891
8892         udelay(120);
8893
8894         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8895                 u16 val16;
8896
8897                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8898                         int j;
8899                         u32 cfg_val;
8900
8901                         /* Wait for link training to complete.  */
8902                         for (j = 0; j < 5000; j++)
8903                                 udelay(100);
8904
8905                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8906                         pci_write_config_dword(tp->pdev, 0xc4,
8907                                                cfg_val | (1 << 15));
8908                 }
8909
8910                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8911                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8912                 /*
8913                  * Older PCIe devices only support the 128 byte
8914                  * MPS setting.  Enforce the restriction.
8915                  */
8916                 if (!tg3_flag(tp, CPMU_PRESENT))
8917                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8918                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8919
8920                 /* Clear error status */
8921                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8922                                       PCI_EXP_DEVSTA_CED |
8923                                       PCI_EXP_DEVSTA_NFED |
8924                                       PCI_EXP_DEVSTA_FED |
8925                                       PCI_EXP_DEVSTA_URD);
8926         }
8927
8928         tg3_restore_pci_state(tp);
8929
8930         tg3_flag_clear(tp, CHIP_RESETTING);
8931         tg3_flag_clear(tp, ERROR_PROCESSED);
8932
8933         val = 0;
8934         if (tg3_flag(tp, 5780_CLASS))
8935                 val = tr32(MEMARB_MODE);
8936         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8937
8938         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8939                 tg3_stop_fw(tp);
8940                 tw32(0x5000, 0x400);
8941         }
8942
8943         if (tg3_flag(tp, IS_SSB_CORE)) {
8944                 /*
8945                  * BCM4785: In order to avoid repercussions from using
8946                  * potentially defective internal ROM, stop the Rx RISC CPU,
8947                  * which is not required.
8948                  */
8949                 tg3_stop_fw(tp);
8950                 tg3_halt_cpu(tp, RX_CPU_BASE);
8951         }
8952
8953         err = tg3_poll_fw(tp);
8954         if (err)
8955                 return err;
8956
8957         tw32(GRC_MODE, tp->grc_mode);
8958
8959         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8960                 val = tr32(0xc4);
8961
8962                 tw32(0xc4, val | (1 << 15));
8963         }
8964
8965         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8966             tg3_asic_rev(tp) == ASIC_REV_5705) {
8967                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8968                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8969                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8970                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8971         }
8972
8973         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8974                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8975                 val = tp->mac_mode;
8976         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8977                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8978                 val = tp->mac_mode;
8979         } else
8980                 val = 0;
8981
8982         tw32_f(MAC_MODE, val);
8983         udelay(40);
8984
8985         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8986
8987         tg3_mdio_start(tp);
8988
8989         if (tg3_flag(tp, PCI_EXPRESS) &&
8990             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8991             tg3_asic_rev(tp) != ASIC_REV_5785 &&
8992             !tg3_flag(tp, 57765_PLUS)) {
8993                 val = tr32(0x7c00);
8994
8995                 tw32(0x7c00, val | (1 << 25));
8996         }
8997
8998         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8999                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9000                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9001         }
9002
9003         /* Reprobe ASF enable state.  */
9004         tg3_flag_clear(tp, ENABLE_ASF);
9005         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9006                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9007
9008         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9009         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9010         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9011                 u32 nic_cfg;
9012
9013                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9014                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9015                         tg3_flag_set(tp, ENABLE_ASF);
9016                         tp->last_event_jiffies = jiffies;
9017                         if (tg3_flag(tp, 5750_PLUS))
9018                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9019
9020                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9021                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9022                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9023                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9024                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9025                 }
9026         }
9027
9028         return 0;
9029 }
9030
9031 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9032 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9033
9034 /* tp->lock is held. */
9035 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9036 {
9037         int err;
9038
9039         tg3_stop_fw(tp);
9040
9041         tg3_write_sig_pre_reset(tp, kind);
9042
9043         tg3_abort_hw(tp, silent);
9044         err = tg3_chip_reset(tp);
9045
9046         __tg3_set_mac_addr(tp, false);
9047
9048         tg3_write_sig_legacy(tp, kind);
9049         tg3_write_sig_post_reset(tp, kind);
9050
9051         if (tp->hw_stats) {
9052                 /* Save the stats across chip resets... */
9053                 tg3_get_nstats(tp, &tp->net_stats_prev);
9054                 tg3_get_estats(tp, &tp->estats_prev);
9055
9056                 /* And make sure the next sample is new data */
9057                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9058         }
9059
9060         if (err)
9061                 return err;
9062
9063         return 0;
9064 }
9065
9066 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9067 {
9068         struct tg3 *tp = netdev_priv(dev);
9069         struct sockaddr *addr = p;
9070         int err = 0;
9071         bool skip_mac_1 = false;
9072
9073         if (!is_valid_ether_addr(addr->sa_data))
9074                 return -EADDRNOTAVAIL;
9075
9076         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9077
9078         if (!netif_running(dev))
9079                 return 0;
9080
9081         if (tg3_flag(tp, ENABLE_ASF)) {
9082                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9083
9084                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9085                 addr0_low = tr32(MAC_ADDR_0_LOW);
9086                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9087                 addr1_low = tr32(MAC_ADDR_1_LOW);
9088
9089                 /* Skip MAC addr 1 if ASF is using it. */
9090                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9091                     !(addr1_high == 0 && addr1_low == 0))
9092                         skip_mac_1 = true;
9093         }
9094         spin_lock_bh(&tp->lock);
9095         __tg3_set_mac_addr(tp, skip_mac_1);
9096         spin_unlock_bh(&tp->lock);
9097
9098         return err;
9099 }
9100
9101 /* tp->lock is held. */
9102 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9103                            dma_addr_t mapping, u32 maxlen_flags,
9104                            u32 nic_addr)
9105 {
9106         tg3_write_mem(tp,
9107                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9108                       ((u64) mapping >> 32));
9109         tg3_write_mem(tp,
9110                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9111                       ((u64) mapping & 0xffffffff));
9112         tg3_write_mem(tp,
9113                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9114                        maxlen_flags);
9115
9116         if (!tg3_flag(tp, 5705_PLUS))
9117                 tg3_write_mem(tp,
9118                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9119                               nic_addr);
9120 }
9121
9122
9123 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9124 {
9125         int i = 0;
9126
9127         if (!tg3_flag(tp, ENABLE_TSS)) {
9128                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9129                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9130                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9131         } else {
9132                 tw32(HOSTCC_TXCOL_TICKS, 0);
9133                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9134                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9135
9136                 for (; i < tp->txq_cnt; i++) {
9137                         u32 reg;
9138
9139                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9140                         tw32(reg, ec->tx_coalesce_usecs);
9141                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9142                         tw32(reg, ec->tx_max_coalesced_frames);
9143                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9144                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9145                 }
9146         }
9147
9148         for (; i < tp->irq_max - 1; i++) {
9149                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9150                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9151                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9152         }
9153 }
9154
9155 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9156 {
9157         int i = 0;
9158         u32 limit = tp->rxq_cnt;
9159
9160         if (!tg3_flag(tp, ENABLE_RSS)) {
9161                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9162                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9163                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9164                 limit--;
9165         } else {
9166                 tw32(HOSTCC_RXCOL_TICKS, 0);
9167                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9168                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9169         }
9170
9171         for (; i < limit; i++) {
9172                 u32 reg;
9173
9174                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9175                 tw32(reg, ec->rx_coalesce_usecs);
9176                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9177                 tw32(reg, ec->rx_max_coalesced_frames);
9178                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9179                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9180         }
9181
9182         for (; i < tp->irq_max - 1; i++) {
9183                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9184                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9185                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9186         }
9187 }
9188
9189 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9190 {
9191         tg3_coal_tx_init(tp, ec);
9192         tg3_coal_rx_init(tp, ec);
9193
9194         if (!tg3_flag(tp, 5705_PLUS)) {
9195                 u32 val = ec->stats_block_coalesce_usecs;
9196
9197                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9198                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9199
9200                 if (!tp->link_up)
9201                         val = 0;
9202
9203                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9204         }
9205 }
9206
9207 /* tp->lock is held. */
9208 static void tg3_rings_reset(struct tg3 *tp)
9209 {
9210         int i;
9211         u32 stblk, txrcb, rxrcb, limit;
9212         struct tg3_napi *tnapi = &tp->napi[0];
9213
9214         /* Disable all transmit rings but the first. */
9215         if (!tg3_flag(tp, 5705_PLUS))
9216                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9217         else if (tg3_flag(tp, 5717_PLUS))
9218                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9219         else if (tg3_flag(tp, 57765_CLASS) ||
9220                  tg3_asic_rev(tp) == ASIC_REV_5762)
9221                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9222         else
9223                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9224
9225         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9226              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9227                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9228                               BDINFO_FLAGS_DISABLED);
9229
9230
9231         /* Disable all receive return rings but the first. */
9232         if (tg3_flag(tp, 5717_PLUS))
9233                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9234         else if (!tg3_flag(tp, 5705_PLUS))
9235                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9236         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9237                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9238                  tg3_flag(tp, 57765_CLASS))
9239                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9240         else
9241                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9242
9243         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9244              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9245                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9246                               BDINFO_FLAGS_DISABLED);
9247
9248         /* Disable interrupts */
9249         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9250         tp->napi[0].chk_msi_cnt = 0;
9251         tp->napi[0].last_rx_cons = 0;
9252         tp->napi[0].last_tx_cons = 0;
9253
9254         /* Zero mailbox registers. */
9255         if (tg3_flag(tp, SUPPORT_MSIX)) {
9256                 for (i = 1; i < tp->irq_max; i++) {
9257                         tp->napi[i].tx_prod = 0;
9258                         tp->napi[i].tx_cons = 0;
9259                         if (tg3_flag(tp, ENABLE_TSS))
9260                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9261                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9262                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9263                         tp->napi[i].chk_msi_cnt = 0;
9264                         tp->napi[i].last_rx_cons = 0;
9265                         tp->napi[i].last_tx_cons = 0;
9266                 }
9267                 if (!tg3_flag(tp, ENABLE_TSS))
9268                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9269         } else {
9270                 tp->napi[0].tx_prod = 0;
9271                 tp->napi[0].tx_cons = 0;
9272                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9273                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9274         }
9275
9276         /* Make sure the NIC-based send BD rings are disabled. */
9277         if (!tg3_flag(tp, 5705_PLUS)) {
9278                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9279                 for (i = 0; i < 16; i++)
9280                         tw32_tx_mbox(mbox + i * 8, 0);
9281         }
9282
9283         txrcb = NIC_SRAM_SEND_RCB;
9284         rxrcb = NIC_SRAM_RCV_RET_RCB;
9285
9286         /* Clear status block in ram. */
9287         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9288
9289         /* Set status block DMA address */
9290         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9291              ((u64) tnapi->status_mapping >> 32));
9292         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9293              ((u64) tnapi->status_mapping & 0xffffffff));
9294
9295         if (tnapi->tx_ring) {
9296                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9297                                (TG3_TX_RING_SIZE <<
9298                                 BDINFO_FLAGS_MAXLEN_SHIFT),
9299                                NIC_SRAM_TX_BUFFER_DESC);
9300                 txrcb += TG3_BDINFO_SIZE;
9301         }
9302
9303         if (tnapi->rx_rcb) {
9304                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9305                                (tp->rx_ret_ring_mask + 1) <<
9306                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9307                 rxrcb += TG3_BDINFO_SIZE;
9308         }
9309
9310         stblk = HOSTCC_STATBLCK_RING1;
9311
9312         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9313                 u64 mapping = (u64)tnapi->status_mapping;
9314                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9315                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9316
9317                 /* Clear status block in ram. */
9318                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9319
9320                 if (tnapi->tx_ring) {
9321                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9322                                        (TG3_TX_RING_SIZE <<
9323                                         BDINFO_FLAGS_MAXLEN_SHIFT),
9324                                        NIC_SRAM_TX_BUFFER_DESC);
9325                         txrcb += TG3_BDINFO_SIZE;
9326                 }
9327
9328                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9329                                ((tp->rx_ret_ring_mask + 1) <<
9330                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9331
9332                 stblk += 8;
9333                 rxrcb += TG3_BDINFO_SIZE;
9334         }
9335 }
9336
9337 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9338 {
9339         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9340
9341         if (!tg3_flag(tp, 5750_PLUS) ||
9342             tg3_flag(tp, 5780_CLASS) ||
9343             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9344             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9345             tg3_flag(tp, 57765_PLUS))
9346                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9347         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9348                  tg3_asic_rev(tp) == ASIC_REV_5787)
9349                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9350         else
9351                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9352
9353         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9354         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9355
9356         val = min(nic_rep_thresh, host_rep_thresh);
9357         tw32(RCVBDI_STD_THRESH, val);
9358
9359         if (tg3_flag(tp, 57765_PLUS))
9360                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9361
9362         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9363                 return;
9364
9365         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9366
9367         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9368
9369         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9370         tw32(RCVBDI_JUMBO_THRESH, val);
9371
9372         if (tg3_flag(tp, 57765_PLUS))
9373                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9374 }
9375
9376 static inline u32 calc_crc(unsigned char *buf, int len)
9377 {
9378         u32 reg;
9379         u32 tmp;
9380         int j, k;
9381
9382         reg = 0xffffffff;
9383
9384         for (j = 0; j < len; j++) {
9385                 reg ^= buf[j];
9386
9387                 for (k = 0; k < 8; k++) {
9388                         tmp = reg & 0x01;
9389
9390                         reg >>= 1;
9391
9392                         if (tmp)
9393                                 reg ^= 0xedb88320;
9394                 }
9395         }
9396
9397         return ~reg;
9398 }
9399
9400 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9401 {
9402         /* accept or reject all multicast frames */
9403         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9404         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9405         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9406         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9407 }
9408
9409 static void __tg3_set_rx_mode(struct net_device *dev)
9410 {
9411         struct tg3 *tp = netdev_priv(dev);
9412         u32 rx_mode;
9413
9414         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9415                                   RX_MODE_KEEP_VLAN_TAG);
9416
9417 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9418         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9419          * flag clear.
9420          */
9421         if (!tg3_flag(tp, ENABLE_ASF))
9422                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9423 #endif
9424
9425         if (dev->flags & IFF_PROMISC) {
9426                 /* Promiscuous mode. */
9427                 rx_mode |= RX_MODE_PROMISC;
9428         } else if (dev->flags & IFF_ALLMULTI) {
9429                 /* Accept all multicast. */
9430                 tg3_set_multi(tp, 1);
9431         } else if (netdev_mc_empty(dev)) {
9432                 /* Reject all multicast. */
9433                 tg3_set_multi(tp, 0);
9434         } else {
9435                 /* Accept one or more multicast(s). */
9436                 struct netdev_hw_addr *ha;
9437                 u32 mc_filter[4] = { 0, };
9438                 u32 regidx;
9439                 u32 bit;
9440                 u32 crc;
9441
9442                 netdev_for_each_mc_addr(ha, dev) {
9443                         crc = calc_crc(ha->addr, ETH_ALEN);
9444                         bit = ~crc & 0x7f;
9445                         regidx = (bit & 0x60) >> 5;
9446                         bit &= 0x1f;
9447                         mc_filter[regidx] |= (1 << bit);
9448                 }
9449
9450                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9451                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9452                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9453                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9454         }
9455
9456         if (rx_mode != tp->rx_mode) {
9457                 tp->rx_mode = rx_mode;
9458                 tw32_f(MAC_RX_MODE, rx_mode);
9459                 udelay(10);
9460         }
9461 }
9462
9463 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9464 {
9465         int i;
9466
9467         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9468                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9469 }
9470
9471 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9472 {
9473         int i;
9474
9475         if (!tg3_flag(tp, SUPPORT_MSIX))
9476                 return;
9477
9478         if (tp->rxq_cnt == 1) {
9479                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9480                 return;
9481         }
9482
9483         /* Validate table against current IRQ count */
9484         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9485                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9486                         break;
9487         }
9488
9489         if (i != TG3_RSS_INDIR_TBL_SIZE)
9490                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9491 }
9492
9493 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9494 {
9495         int i = 0;
9496         u32 reg = MAC_RSS_INDIR_TBL_0;
9497
9498         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9499                 u32 val = tp->rss_ind_tbl[i];
9500                 i++;
9501                 for (; i % 8; i++) {
9502                         val <<= 4;
9503                         val |= tp->rss_ind_tbl[i];
9504                 }
9505                 tw32(reg, val);
9506                 reg += 4;
9507         }
9508 }
9509
9510 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9511 {
9512         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9513                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9514         else
9515                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9516 }
9517
9518 /* tp->lock is held. */
9519 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9520 {
9521         u32 val, rdmac_mode;
9522         int i, err, limit;
9523         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9524
9525         tg3_disable_ints(tp);
9526
9527         tg3_stop_fw(tp);
9528
9529         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9530
9531         if (tg3_flag(tp, INIT_COMPLETE))
9532                 tg3_abort_hw(tp, 1);
9533
9534         /* Enable MAC control of LPI */
9535         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9536                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9537                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9538                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9539                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9540
9541                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9542
9543                 tw32_f(TG3_CPMU_EEE_CTRL,
9544                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9545
9546                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9547                       TG3_CPMU_EEEMD_LPI_IN_TX |
9548                       TG3_CPMU_EEEMD_LPI_IN_RX |
9549                       TG3_CPMU_EEEMD_EEE_ENABLE;
9550
9551                 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9552                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9553
9554                 if (tg3_flag(tp, ENABLE_APE))
9555                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9556
9557                 tw32_f(TG3_CPMU_EEE_MODE, val);
9558
9559                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9560                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9561                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9562
9563                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9564                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9565                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9566         }
9567
9568         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9569             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9570                 tg3_phy_pull_config(tp);
9571                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9572         }
9573
9574         if (reset_phy)
9575                 tg3_phy_reset(tp);
9576
9577         err = tg3_chip_reset(tp);
9578         if (err)
9579                 return err;
9580
9581         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9582
9583         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9584                 val = tr32(TG3_CPMU_CTRL);
9585                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9586                 tw32(TG3_CPMU_CTRL, val);
9587
9588                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9589                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9590                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9591                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9592
9593                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9594                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9595                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9596                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9597
9598                 val = tr32(TG3_CPMU_HST_ACC);
9599                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9600                 val |= CPMU_HST_ACC_MACCLK_6_25;
9601                 tw32(TG3_CPMU_HST_ACC, val);
9602         }
9603
9604         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9605                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9606                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9607                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9608                 tw32(PCIE_PWR_MGMT_THRESH, val);
9609
9610                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9611                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9612
9613                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9614
9615                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9616                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9617         }
9618
9619         if (tg3_flag(tp, L1PLLPD_EN)) {
9620                 u32 grc_mode = tr32(GRC_MODE);
9621
9622                 /* Access the lower 1K of PL PCIE block registers. */
9623                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9624                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9625
9626                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9627                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9628                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9629
9630                 tw32(GRC_MODE, grc_mode);
9631         }
9632
9633         if (tg3_flag(tp, 57765_CLASS)) {
9634                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9635                         u32 grc_mode = tr32(GRC_MODE);
9636
9637                         /* Access the lower 1K of PL PCIE block registers. */
9638                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9639                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9640
9641                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9642                                    TG3_PCIE_PL_LO_PHYCTL5);
9643                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9644                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9645
9646                         tw32(GRC_MODE, grc_mode);
9647                 }
9648
9649                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9650                         u32 grc_mode;
9651
9652                         /* Fix transmit hangs */
9653                         val = tr32(TG3_CPMU_PADRNG_CTL);
9654                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9655                         tw32(TG3_CPMU_PADRNG_CTL, val);
9656
9657                         grc_mode = tr32(GRC_MODE);
9658
9659                         /* Access the lower 1K of DL PCIE block registers. */
9660                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9661                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9662
9663                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9664                                    TG3_PCIE_DL_LO_FTSMAX);
9665                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9666                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9667                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9668
9669                         tw32(GRC_MODE, grc_mode);
9670                 }
9671
9672                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9673                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9674                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9675                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9676         }
9677
9678         /* This works around an issue with Athlon chipsets on
9679          * B3 tigon3 silicon.  This bit has no effect on any
9680          * other revision.  But do not set this on PCI Express
9681          * chips and don't even touch the clocks if the CPMU is present.
9682          */
9683         if (!tg3_flag(tp, CPMU_PRESENT)) {
9684                 if (!tg3_flag(tp, PCI_EXPRESS))
9685                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9686                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9687         }
9688
9689         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9690             tg3_flag(tp, PCIX_MODE)) {
9691                 val = tr32(TG3PCI_PCISTATE);
9692                 val |= PCISTATE_RETRY_SAME_DMA;
9693                 tw32(TG3PCI_PCISTATE, val);
9694         }
9695
9696         if (tg3_flag(tp, ENABLE_APE)) {
9697                 /* Allow reads and writes to the
9698                  * APE register and memory space.
9699                  */
9700                 val = tr32(TG3PCI_PCISTATE);
9701                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9702                        PCISTATE_ALLOW_APE_SHMEM_WR |
9703                        PCISTATE_ALLOW_APE_PSPACE_WR;
9704                 tw32(TG3PCI_PCISTATE, val);
9705         }
9706
9707         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9708                 /* Enable some hw fixes.  */
9709                 val = tr32(TG3PCI_MSI_DATA);
9710                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9711                 tw32(TG3PCI_MSI_DATA, val);
9712         }
9713
9714         /* Descriptor ring init may make accesses to the
9715          * NIC SRAM area to setup the TX descriptors, so we
9716          * can only do this after the hardware has been
9717          * successfully reset.
9718          */
9719         err = tg3_init_rings(tp);
9720         if (err)
9721                 return err;
9722
9723         if (tg3_flag(tp, 57765_PLUS)) {
9724                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9725                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9726                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9727                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9728                 if (!tg3_flag(tp, 57765_CLASS) &&
9729                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9730                     tg3_asic_rev(tp) != ASIC_REV_5762)
9731                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9732                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9733         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9734                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9735                 /* This value is determined during the probe time DMA
9736                  * engine test, tg3_test_dma.
9737                  */
9738                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9739         }
9740
9741         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9742                           GRC_MODE_4X_NIC_SEND_RINGS |
9743                           GRC_MODE_NO_TX_PHDR_CSUM |
9744                           GRC_MODE_NO_RX_PHDR_CSUM);
9745         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9746
9747         /* Pseudo-header checksum is done by hardware logic and not
9748          * the offload processers, so make the chip do the pseudo-
9749          * header checksums on receive.  For transmit it is more
9750          * convenient to do the pseudo-header checksum in software
9751          * as Linux does that on transmit for us in all cases.
9752          */
9753         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9754
9755         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9756         if (tp->rxptpctl)
9757                 tw32(TG3_RX_PTP_CTL,
9758                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9759
9760         if (tg3_flag(tp, PTP_CAPABLE))
9761                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9762
9763         tw32(GRC_MODE, tp->grc_mode | val);
9764
9765         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9766         val = tr32(GRC_MISC_CFG);
9767         val &= ~0xff;
9768         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9769         tw32(GRC_MISC_CFG, val);
9770
9771         /* Initialize MBUF/DESC pool. */
9772         if (tg3_flag(tp, 5750_PLUS)) {
9773                 /* Do nothing.  */
9774         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9775                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9776                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9777                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9778                 else
9779                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9780                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9781                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9782         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9783                 int fw_len;
9784
9785                 fw_len = tp->fw_len;
9786                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9787                 tw32(BUFMGR_MB_POOL_ADDR,
9788                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9789                 tw32(BUFMGR_MB_POOL_SIZE,
9790                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9791         }
9792
9793         if (tp->dev->mtu <= ETH_DATA_LEN) {
9794                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9795                      tp->bufmgr_config.mbuf_read_dma_low_water);
9796                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9797                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9798                 tw32(BUFMGR_MB_HIGH_WATER,
9799                      tp->bufmgr_config.mbuf_high_water);
9800         } else {
9801                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9802                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9803                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9804                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9805                 tw32(BUFMGR_MB_HIGH_WATER,
9806                      tp->bufmgr_config.mbuf_high_water_jumbo);
9807         }
9808         tw32(BUFMGR_DMA_LOW_WATER,
9809              tp->bufmgr_config.dma_low_water);
9810         tw32(BUFMGR_DMA_HIGH_WATER,
9811              tp->bufmgr_config.dma_high_water);
9812
9813         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9814         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9815                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9816         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9817             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9818             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9819                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9820         tw32(BUFMGR_MODE, val);
9821         for (i = 0; i < 2000; i++) {
9822                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9823                         break;
9824                 udelay(10);
9825         }
9826         if (i >= 2000) {
9827                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9828                 return -ENODEV;
9829         }
9830
9831         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9832                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9833
9834         tg3_setup_rxbd_thresholds(tp);
9835
9836         /* Initialize TG3_BDINFO's at:
9837          *  RCVDBDI_STD_BD:     standard eth size rx ring
9838          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9839          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9840          *
9841          * like so:
9842          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9843          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9844          *                              ring attribute flags
9845          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9846          *
9847          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9848          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9849          *
9850          * The size of each ring is fixed in the firmware, but the location is
9851          * configurable.
9852          */
9853         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9854              ((u64) tpr->rx_std_mapping >> 32));
9855         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9856              ((u64) tpr->rx_std_mapping & 0xffffffff));
9857         if (!tg3_flag(tp, 5717_PLUS))
9858                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9859                      NIC_SRAM_RX_BUFFER_DESC);
9860
9861         /* Disable the mini ring */
9862         if (!tg3_flag(tp, 5705_PLUS))
9863                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9864                      BDINFO_FLAGS_DISABLED);
9865
9866         /* Program the jumbo buffer descriptor ring control
9867          * blocks on those devices that have them.
9868          */
9869         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9870             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9871
9872                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9873                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9874                              ((u64) tpr->rx_jmb_mapping >> 32));
9875                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9876                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9877                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9878                               BDINFO_FLAGS_MAXLEN_SHIFT;
9879                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9880                              val | BDINFO_FLAGS_USE_EXT_RECV);
9881                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9882                             tg3_flag(tp, 57765_CLASS) ||
9883                             tg3_asic_rev(tp) == ASIC_REV_5762)
9884                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9885                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9886                 } else {
9887                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9888                              BDINFO_FLAGS_DISABLED);
9889                 }
9890
9891                 if (tg3_flag(tp, 57765_PLUS)) {
9892                         val = TG3_RX_STD_RING_SIZE(tp);
9893                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9894                         val |= (TG3_RX_STD_DMA_SZ << 2);
9895                 } else
9896                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9897         } else
9898                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9899
9900         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9901
9902         tpr->rx_std_prod_idx = tp->rx_pending;
9903         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9904
9905         tpr->rx_jmb_prod_idx =
9906                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9907         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9908
9909         tg3_rings_reset(tp);
9910
9911         /* Initialize MAC address and backoff seed. */
9912         __tg3_set_mac_addr(tp, false);
9913
9914         /* MTU + ethernet header + FCS + optional VLAN tag */
9915         tw32(MAC_RX_MTU_SIZE,
9916              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9917
9918         /* The slot time is changed by tg3_setup_phy if we
9919          * run at gigabit with half duplex.
9920          */
9921         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9922               (6 << TX_LENGTHS_IPG_SHIFT) |
9923               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9924
9925         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9926             tg3_asic_rev(tp) == ASIC_REV_5762)
9927                 val |= tr32(MAC_TX_LENGTHS) &
9928                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9929                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9930
9931         tw32(MAC_TX_LENGTHS, val);
9932
9933         /* Receive rules. */
9934         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9935         tw32(RCVLPC_CONFIG, 0x0181);
9936
9937         /* Calculate RDMAC_MODE setting early, we need it to determine
9938          * the RCVLPC_STATE_ENABLE mask.
9939          */
9940         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9941                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9942                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9943                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9944                       RDMAC_MODE_LNGREAD_ENAB);
9945
9946         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9947                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9948
9949         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9950             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9951             tg3_asic_rev(tp) == ASIC_REV_57780)
9952                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9953                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9954                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9955
9956         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9957             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9958                 if (tg3_flag(tp, TSO_CAPABLE) &&
9959                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9960                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9961                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9962                            !tg3_flag(tp, IS_5788)) {
9963                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9964                 }
9965         }
9966
9967         if (tg3_flag(tp, PCI_EXPRESS))
9968                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9969
9970         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9971                 tp->dma_limit = 0;
9972                 if (tp->dev->mtu <= ETH_DATA_LEN) {
9973                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9974                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9975                 }
9976         }
9977
9978         if (tg3_flag(tp, HW_TSO_1) ||
9979             tg3_flag(tp, HW_TSO_2) ||
9980             tg3_flag(tp, HW_TSO_3))
9981                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9982
9983         if (tg3_flag(tp, 57765_PLUS) ||
9984             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9985             tg3_asic_rev(tp) == ASIC_REV_57780)
9986                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9987
9988         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9989             tg3_asic_rev(tp) == ASIC_REV_5762)
9990                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9991
9992         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9993             tg3_asic_rev(tp) == ASIC_REV_5784 ||
9994             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9995             tg3_asic_rev(tp) == ASIC_REV_57780 ||
9996             tg3_flag(tp, 57765_PLUS)) {
9997                 u32 tgtreg;
9998
9999                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10000                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10001                 else
10002                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10003
10004                 val = tr32(tgtreg);
10005                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10006                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10007                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10008                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10009                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10010                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10011                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10012                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10013                 }
10014                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10015         }
10016
10017         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10018             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10019             tg3_asic_rev(tp) == ASIC_REV_5762) {
10020                 u32 tgtreg;
10021
10022                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10023                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10024                 else
10025                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10026
10027                 val = tr32(tgtreg);
10028                 tw32(tgtreg, val |
10029                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10030                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10031         }
10032
10033         /* Receive/send statistics. */
10034         if (tg3_flag(tp, 5750_PLUS)) {
10035                 val = tr32(RCVLPC_STATS_ENABLE);
10036                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10037                 tw32(RCVLPC_STATS_ENABLE, val);
10038         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10039                    tg3_flag(tp, TSO_CAPABLE)) {
10040                 val = tr32(RCVLPC_STATS_ENABLE);
10041                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10042                 tw32(RCVLPC_STATS_ENABLE, val);
10043         } else {
10044                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10045         }
10046         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10047         tw32(SNDDATAI_STATSENAB, 0xffffff);
10048         tw32(SNDDATAI_STATSCTRL,
10049              (SNDDATAI_SCTRL_ENABLE |
10050               SNDDATAI_SCTRL_FASTUPD));
10051
10052         /* Setup host coalescing engine. */
10053         tw32(HOSTCC_MODE, 0);
10054         for (i = 0; i < 2000; i++) {
10055                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10056                         break;
10057                 udelay(10);
10058         }
10059
10060         __tg3_set_coalesce(tp, &tp->coal);
10061
10062         if (!tg3_flag(tp, 5705_PLUS)) {
10063                 /* Status/statistics block address.  See tg3_timer,
10064                  * the tg3_periodic_fetch_stats call there, and
10065                  * tg3_get_stats to see how this works for 5705/5750 chips.
10066                  */
10067                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10068                      ((u64) tp->stats_mapping >> 32));
10069                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10070                      ((u64) tp->stats_mapping & 0xffffffff));
10071                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10072
10073                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10074
10075                 /* Clear statistics and status block memory areas */
10076                 for (i = NIC_SRAM_STATS_BLK;
10077                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10078                      i += sizeof(u32)) {
10079                         tg3_write_mem(tp, i, 0);
10080                         udelay(40);
10081                 }
10082         }
10083
10084         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10085
10086         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10087         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10088         if (!tg3_flag(tp, 5705_PLUS))
10089                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10090
10091         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10092                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10093                 /* reset to prevent losing 1st rx packet intermittently */
10094                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10095                 udelay(10);
10096         }
10097
10098         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10099                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10100                         MAC_MODE_FHDE_ENABLE;
10101         if (tg3_flag(tp, ENABLE_APE))
10102                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10103         if (!tg3_flag(tp, 5705_PLUS) &&
10104             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10105             tg3_asic_rev(tp) != ASIC_REV_5700)
10106                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10107         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10108         udelay(40);
10109
10110         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10111          * If TG3_FLAG_IS_NIC is zero, we should read the
10112          * register to preserve the GPIO settings for LOMs. The GPIOs,
10113          * whether used as inputs or outputs, are set by boot code after
10114          * reset.
10115          */
10116         if (!tg3_flag(tp, IS_NIC)) {
10117                 u32 gpio_mask;
10118
10119                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10120                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10121                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10122
10123                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10124                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10125                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10126
10127                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10128                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10129
10130                 tp->grc_local_ctrl &= ~gpio_mask;
10131                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10132
10133                 /* GPIO1 must be driven high for eeprom write protect */
10134                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10135                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10136                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10137         }
10138         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10139         udelay(100);
10140
10141         if (tg3_flag(tp, USING_MSIX)) {
10142                 val = tr32(MSGINT_MODE);
10143                 val |= MSGINT_MODE_ENABLE;
10144                 if (tp->irq_cnt > 1)
10145                         val |= MSGINT_MODE_MULTIVEC_EN;
10146                 if (!tg3_flag(tp, 1SHOT_MSI))
10147                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10148                 tw32(MSGINT_MODE, val);
10149         }
10150
10151         if (!tg3_flag(tp, 5705_PLUS)) {
10152                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10153                 udelay(40);
10154         }
10155
10156         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10157                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10158                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10159                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10160                WDMAC_MODE_LNGREAD_ENAB);
10161
10162         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10163             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10164                 if (tg3_flag(tp, TSO_CAPABLE) &&
10165                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10166                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10167                         /* nothing */
10168                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10169                            !tg3_flag(tp, IS_5788)) {
10170                         val |= WDMAC_MODE_RX_ACCEL;
10171                 }
10172         }
10173
10174         /* Enable host coalescing bug fix */
10175         if (tg3_flag(tp, 5755_PLUS))
10176                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10177
10178         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10179                 val |= WDMAC_MODE_BURST_ALL_DATA;
10180
10181         tw32_f(WDMAC_MODE, val);
10182         udelay(40);
10183
10184         if (tg3_flag(tp, PCIX_MODE)) {
10185                 u16 pcix_cmd;
10186
10187                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10188                                      &pcix_cmd);
10189                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10190                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10191                         pcix_cmd |= PCI_X_CMD_READ_2K;
10192                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10193                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10194                         pcix_cmd |= PCI_X_CMD_READ_2K;
10195                 }
10196                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10197                                       pcix_cmd);
10198         }
10199
10200         tw32_f(RDMAC_MODE, rdmac_mode);
10201         udelay(40);
10202
10203         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10204             tg3_asic_rev(tp) == ASIC_REV_5720) {
10205                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10206                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10207                                 break;
10208                 }
10209                 if (i < TG3_NUM_RDMA_CHANNELS) {
10210                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10211                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10212                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10213                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10214                 }
10215         }
10216
10217         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10218         if (!tg3_flag(tp, 5705_PLUS))
10219                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10220
10221         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10222                 tw32(SNDDATAC_MODE,
10223                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10224         else
10225                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10226
10227         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10228         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10229         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10230         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10231                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10232         tw32(RCVDBDI_MODE, val);
10233         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10234         if (tg3_flag(tp, HW_TSO_1) ||
10235             tg3_flag(tp, HW_TSO_2) ||
10236             tg3_flag(tp, HW_TSO_3))
10237                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10238         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10239         if (tg3_flag(tp, ENABLE_TSS))
10240                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10241         tw32(SNDBDI_MODE, val);
10242         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10243
10244         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10245                 err = tg3_load_5701_a0_firmware_fix(tp);
10246                 if (err)
10247                         return err;
10248         }
10249
10250         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10251                 /* Ignore any errors for the firmware download. If download
10252                  * fails, the device will operate with EEE disabled
10253                  */
10254                 tg3_load_57766_firmware(tp);
10255         }
10256
10257         if (tg3_flag(tp, TSO_CAPABLE)) {
10258                 err = tg3_load_tso_firmware(tp);
10259                 if (err)
10260                         return err;
10261         }
10262
10263         tp->tx_mode = TX_MODE_ENABLE;
10264
10265         if (tg3_flag(tp, 5755_PLUS) ||
10266             tg3_asic_rev(tp) == ASIC_REV_5906)
10267                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10268
10269         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10270             tg3_asic_rev(tp) == ASIC_REV_5762) {
10271                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10272                 tp->tx_mode &= ~val;
10273                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10274         }
10275
10276         tw32_f(MAC_TX_MODE, tp->tx_mode);
10277         udelay(100);
10278
10279         if (tg3_flag(tp, ENABLE_RSS)) {
10280                 tg3_rss_write_indir_tbl(tp);
10281
10282                 /* Setup the "secret" hash key. */
10283                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10284                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10285                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10286                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10287                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10288                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10289                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10290                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10291                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10292                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10293         }
10294
10295         tp->rx_mode = RX_MODE_ENABLE;
10296         if (tg3_flag(tp, 5755_PLUS))
10297                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10298
10299         if (tg3_flag(tp, ENABLE_RSS))
10300                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10301                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10302                                RX_MODE_RSS_IPV6_HASH_EN |
10303                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10304                                RX_MODE_RSS_IPV4_HASH_EN |
10305                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10306
10307         tw32_f(MAC_RX_MODE, tp->rx_mode);
10308         udelay(10);
10309
10310         tw32(MAC_LED_CTRL, tp->led_ctrl);
10311
10312         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10313         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10314                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10315                 udelay(10);
10316         }
10317         tw32_f(MAC_RX_MODE, tp->rx_mode);
10318         udelay(10);
10319
10320         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10321                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10322                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10323                         /* Set drive transmission level to 1.2V  */
10324                         /* only if the signal pre-emphasis bit is not set  */
10325                         val = tr32(MAC_SERDES_CFG);
10326                         val &= 0xfffff000;
10327                         val |= 0x880;
10328                         tw32(MAC_SERDES_CFG, val);
10329                 }
10330                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10331                         tw32(MAC_SERDES_CFG, 0x616000);
10332         }
10333
10334         /* Prevent chip from dropping frames when flow control
10335          * is enabled.
10336          */
10337         if (tg3_flag(tp, 57765_CLASS))
10338                 val = 1;
10339         else
10340                 val = 2;
10341         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10342
10343         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10344             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10345                 /* Use hardware link auto-negotiation */
10346                 tg3_flag_set(tp, HW_AUTONEG);
10347         }
10348
10349         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10350             tg3_asic_rev(tp) == ASIC_REV_5714) {
10351                 u32 tmp;
10352
10353                 tmp = tr32(SERDES_RX_CTRL);
10354                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10355                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10356                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10357                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10358         }
10359
10360         if (!tg3_flag(tp, USE_PHYLIB)) {
10361                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10362                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10363
10364                 err = tg3_setup_phy(tp, false);
10365                 if (err)
10366                         return err;
10367
10368                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10369                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10370                         u32 tmp;
10371
10372                         /* Clear CRC stats. */
10373                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10374                                 tg3_writephy(tp, MII_TG3_TEST1,
10375                                              tmp | MII_TG3_TEST1_CRC_EN);
10376                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10377                         }
10378                 }
10379         }
10380
10381         __tg3_set_rx_mode(tp->dev);
10382
10383         /* Initialize receive rules. */
10384         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10385         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10386         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10387         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10388
10389         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10390                 limit = 8;
10391         else
10392                 limit = 16;
10393         if (tg3_flag(tp, ENABLE_ASF))
10394                 limit -= 4;
10395         switch (limit) {
10396         case 16:
10397                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10398         case 15:
10399                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10400         case 14:
10401                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10402         case 13:
10403                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10404         case 12:
10405                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10406         case 11:
10407                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10408         case 10:
10409                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10410         case 9:
10411                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10412         case 8:
10413                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10414         case 7:
10415                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10416         case 6:
10417                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10418         case 5:
10419                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10420         case 4:
10421                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10422         case 3:
10423                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10424         case 2:
10425         case 1:
10426
10427         default:
10428                 break;
10429         }
10430
10431         if (tg3_flag(tp, ENABLE_APE))
10432                 /* Write our heartbeat update interval to APE. */
10433                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10434                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10435
10436         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10437
10438         return 0;
10439 }
10440
10441 /* Called at device open time to get the chip ready for
10442  * packet processing.  Invoked with tp->lock held.
10443  */
10444 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10445 {
10446         /* Chip may have been just powered on. If so, the boot code may still
10447          * be running initialization. Wait for it to finish to avoid races in
10448          * accessing the hardware.
10449          */
10450         tg3_enable_register_access(tp);
10451         tg3_poll_fw(tp);
10452
10453         tg3_switch_clocks(tp);
10454
10455         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10456
10457         return tg3_reset_hw(tp, reset_phy);
10458 }
10459
10460 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10461 {
10462         int i;
10463
10464         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10465                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10466
10467                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10468                 off += len;
10469
10470                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10471                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10472                         memset(ocir, 0, TG3_OCIR_LEN);
10473         }
10474 }
10475
10476 /* sysfs attributes for hwmon */
10477 static ssize_t tg3_show_temp(struct device *dev,
10478                              struct device_attribute *devattr, char *buf)
10479 {
10480         struct pci_dev *pdev = to_pci_dev(dev);
10481         struct net_device *netdev = pci_get_drvdata(pdev);
10482         struct tg3 *tp = netdev_priv(netdev);
10483         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10484         u32 temperature;
10485
10486         spin_lock_bh(&tp->lock);
10487         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10488                                 sizeof(temperature));
10489         spin_unlock_bh(&tp->lock);
10490         return sprintf(buf, "%u\n", temperature);
10491 }
10492
10493
10494 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10495                           TG3_TEMP_SENSOR_OFFSET);
10496 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10497                           TG3_TEMP_CAUTION_OFFSET);
10498 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10499                           TG3_TEMP_MAX_OFFSET);
10500
10501 static struct attribute *tg3_attributes[] = {
10502         &sensor_dev_attr_temp1_input.dev_attr.attr,
10503         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10504         &sensor_dev_attr_temp1_max.dev_attr.attr,
10505         NULL
10506 };
10507
10508 static const struct attribute_group tg3_group = {
10509         .attrs = tg3_attributes,
10510 };
10511
10512 static void tg3_hwmon_close(struct tg3 *tp)
10513 {
10514         if (tp->hwmon_dev) {
10515                 hwmon_device_unregister(tp->hwmon_dev);
10516                 tp->hwmon_dev = NULL;
10517                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10518         }
10519 }
10520
10521 static void tg3_hwmon_open(struct tg3 *tp)
10522 {
10523         int i, err;
10524         u32 size = 0;
10525         struct pci_dev *pdev = tp->pdev;
10526         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10527
10528         tg3_sd_scan_scratchpad(tp, ocirs);
10529
10530         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10531                 if (!ocirs[i].src_data_length)
10532                         continue;
10533
10534                 size += ocirs[i].src_hdr_length;
10535                 size += ocirs[i].src_data_length;
10536         }
10537
10538         if (!size)
10539                 return;
10540
10541         /* Register hwmon sysfs hooks */
10542         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10543         if (err) {
10544                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10545                 return;
10546         }
10547
10548         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10549         if (IS_ERR(tp->hwmon_dev)) {
10550                 tp->hwmon_dev = NULL;
10551                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10552                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10553         }
10554 }
10555
10556
10557 #define TG3_STAT_ADD32(PSTAT, REG) \
10558 do {    u32 __val = tr32(REG); \
10559         (PSTAT)->low += __val; \
10560         if ((PSTAT)->low < __val) \
10561                 (PSTAT)->high += 1; \
10562 } while (0)
10563
10564 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10565 {
10566         struct tg3_hw_stats *sp = tp->hw_stats;
10567
10568         if (!tp->link_up)
10569                 return;
10570
10571         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10572         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10573         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10574         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10575         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10576         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10577         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10578         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10579         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10580         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10581         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10582         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10583         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10584         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10585                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10586                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10587                 u32 val;
10588
10589                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10590                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10591                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10592                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10593         }
10594
10595         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10596         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10597         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10598         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10599         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10600         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10601         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10602         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10603         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10604         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10605         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10606         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10607         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10608         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10609
10610         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10611         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10612             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10613             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10614                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10615         } else {
10616                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10617                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10618                 if (val) {
10619                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10620                         sp->rx_discards.low += val;
10621                         if (sp->rx_discards.low < val)
10622                                 sp->rx_discards.high += 1;
10623                 }
10624                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10625         }
10626         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10627 }
10628
10629 static void tg3_chk_missed_msi(struct tg3 *tp)
10630 {
10631         u32 i;
10632
10633         for (i = 0; i < tp->irq_cnt; i++) {
10634                 struct tg3_napi *tnapi = &tp->napi[i];
10635
10636                 if (tg3_has_work(tnapi)) {
10637                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10638                             tnapi->last_tx_cons == tnapi->tx_cons) {
10639                                 if (tnapi->chk_msi_cnt < 1) {
10640                                         tnapi->chk_msi_cnt++;
10641                                         return;
10642                                 }
10643                                 tg3_msi(0, tnapi);
10644                         }
10645                 }
10646                 tnapi->chk_msi_cnt = 0;
10647                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10648                 tnapi->last_tx_cons = tnapi->tx_cons;
10649         }
10650 }
10651
10652 static void tg3_timer(unsigned long __opaque)
10653 {
10654         struct tg3 *tp = (struct tg3 *) __opaque;
10655
10656         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10657                 goto restart_timer;
10658
10659         spin_lock(&tp->lock);
10660
10661         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10662             tg3_flag(tp, 57765_CLASS))
10663                 tg3_chk_missed_msi(tp);
10664
10665         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10666                 /* BCM4785: Flush posted writes from GbE to host memory. */
10667                 tr32(HOSTCC_MODE);
10668         }
10669
10670         if (!tg3_flag(tp, TAGGED_STATUS)) {
10671                 /* All of this garbage is because when using non-tagged
10672                  * IRQ status the mailbox/status_block protocol the chip
10673                  * uses with the cpu is race prone.
10674                  */
10675                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10676                         tw32(GRC_LOCAL_CTRL,
10677                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10678                 } else {
10679                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10680                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10681                 }
10682
10683                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10684                         spin_unlock(&tp->lock);
10685                         tg3_reset_task_schedule(tp);
10686                         goto restart_timer;
10687                 }
10688         }
10689
10690         /* This part only runs once per second. */
10691         if (!--tp->timer_counter) {
10692                 if (tg3_flag(tp, 5705_PLUS))
10693                         tg3_periodic_fetch_stats(tp);
10694
10695                 if (tp->setlpicnt && !--tp->setlpicnt)
10696                         tg3_phy_eee_enable(tp);
10697
10698                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10699                         u32 mac_stat;
10700                         int phy_event;
10701
10702                         mac_stat = tr32(MAC_STATUS);
10703
10704                         phy_event = 0;
10705                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10706                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10707                                         phy_event = 1;
10708                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10709                                 phy_event = 1;
10710
10711                         if (phy_event)
10712                                 tg3_setup_phy(tp, false);
10713                 } else if (tg3_flag(tp, POLL_SERDES)) {
10714                         u32 mac_stat = tr32(MAC_STATUS);
10715                         int need_setup = 0;
10716
10717                         if (tp->link_up &&
10718                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10719                                 need_setup = 1;
10720                         }
10721                         if (!tp->link_up &&
10722                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10723                                          MAC_STATUS_SIGNAL_DET))) {
10724                                 need_setup = 1;
10725                         }
10726                         if (need_setup) {
10727                                 if (!tp->serdes_counter) {
10728                                         tw32_f(MAC_MODE,
10729                                              (tp->mac_mode &
10730                                               ~MAC_MODE_PORT_MODE_MASK));
10731                                         udelay(40);
10732                                         tw32_f(MAC_MODE, tp->mac_mode);
10733                                         udelay(40);
10734                                 }
10735                                 tg3_setup_phy(tp, false);
10736                         }
10737                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10738                            tg3_flag(tp, 5780_CLASS)) {
10739                         tg3_serdes_parallel_detect(tp);
10740                 }
10741
10742                 tp->timer_counter = tp->timer_multiplier;
10743         }
10744
10745         /* Heartbeat is only sent once every 2 seconds.
10746          *
10747          * The heartbeat is to tell the ASF firmware that the host
10748          * driver is still alive.  In the event that the OS crashes,
10749          * ASF needs to reset the hardware to free up the FIFO space
10750          * that may be filled with rx packets destined for the host.
10751          * If the FIFO is full, ASF will no longer function properly.
10752          *
10753          * Unintended resets have been reported on real time kernels
10754          * where the timer doesn't run on time.  Netpoll will also have
10755          * same problem.
10756          *
10757          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10758          * to check the ring condition when the heartbeat is expiring
10759          * before doing the reset.  This will prevent most unintended
10760          * resets.
10761          */
10762         if (!--tp->asf_counter) {
10763                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10764                         tg3_wait_for_event_ack(tp);
10765
10766                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10767                                       FWCMD_NICDRV_ALIVE3);
10768                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10769                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10770                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10771
10772                         tg3_generate_fw_event(tp);
10773                 }
10774                 tp->asf_counter = tp->asf_multiplier;
10775         }
10776
10777         spin_unlock(&tp->lock);
10778
10779 restart_timer:
10780         tp->timer.expires = jiffies + tp->timer_offset;
10781         add_timer(&tp->timer);
10782 }
10783
10784 static void tg3_timer_init(struct tg3 *tp)
10785 {
10786         if (tg3_flag(tp, TAGGED_STATUS) &&
10787             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10788             !tg3_flag(tp, 57765_CLASS))
10789                 tp->timer_offset = HZ;
10790         else
10791                 tp->timer_offset = HZ / 10;
10792
10793         BUG_ON(tp->timer_offset > HZ);
10794
10795         tp->timer_multiplier = (HZ / tp->timer_offset);
10796         tp->asf_multiplier = (HZ / tp->timer_offset) *
10797                              TG3_FW_UPDATE_FREQ_SEC;
10798
10799         init_timer(&tp->timer);
10800         tp->timer.data = (unsigned long) tp;
10801         tp->timer.function = tg3_timer;
10802 }
10803
10804 static void tg3_timer_start(struct tg3 *tp)
10805 {
10806         tp->asf_counter   = tp->asf_multiplier;
10807         tp->timer_counter = tp->timer_multiplier;
10808
10809         tp->timer.expires = jiffies + tp->timer_offset;
10810         add_timer(&tp->timer);
10811 }
10812
10813 static void tg3_timer_stop(struct tg3 *tp)
10814 {
10815         del_timer_sync(&tp->timer);
10816 }
10817
10818 /* Restart hardware after configuration changes, self-test, etc.
10819  * Invoked with tp->lock held.
10820  */
10821 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10822         __releases(tp->lock)
10823         __acquires(tp->lock)
10824 {
10825         int err;
10826
10827         err = tg3_init_hw(tp, reset_phy);
10828         if (err) {
10829                 netdev_err(tp->dev,
10830                            "Failed to re-initialize device, aborting\n");
10831                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10832                 tg3_full_unlock(tp);
10833                 tg3_timer_stop(tp);
10834                 tp->irq_sync = 0;
10835                 tg3_napi_enable(tp);
10836                 dev_close(tp->dev);
10837                 tg3_full_lock(tp, 0);
10838         }
10839         return err;
10840 }
10841
10842 static void tg3_reset_task(struct work_struct *work)
10843 {
10844         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10845         int err;
10846
10847         tg3_full_lock(tp, 0);
10848
10849         if (!netif_running(tp->dev)) {
10850                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10851                 tg3_full_unlock(tp);
10852                 return;
10853         }
10854
10855         tg3_full_unlock(tp);
10856
10857         tg3_phy_stop(tp);
10858
10859         tg3_netif_stop(tp);
10860
10861         tg3_full_lock(tp, 1);
10862
10863         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10864                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10865                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10866                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10867                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10868         }
10869
10870         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10871         err = tg3_init_hw(tp, true);
10872         if (err)
10873                 goto out;
10874
10875         tg3_netif_start(tp);
10876
10877 out:
10878         tg3_full_unlock(tp);
10879
10880         if (!err)
10881                 tg3_phy_start(tp);
10882
10883         tg3_flag_clear(tp, RESET_TASK_PENDING);
10884 }
10885
10886 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10887 {
10888         irq_handler_t fn;
10889         unsigned long flags;
10890         char *name;
10891         struct tg3_napi *tnapi = &tp->napi[irq_num];
10892
10893         if (tp->irq_cnt == 1)
10894                 name = tp->dev->name;
10895         else {
10896                 name = &tnapi->irq_lbl[0];
10897                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10898                 name[IFNAMSIZ-1] = 0;
10899         }
10900
10901         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10902                 fn = tg3_msi;
10903                 if (tg3_flag(tp, 1SHOT_MSI))
10904                         fn = tg3_msi_1shot;
10905                 flags = 0;
10906         } else {
10907                 fn = tg3_interrupt;
10908                 if (tg3_flag(tp, TAGGED_STATUS))
10909                         fn = tg3_interrupt_tagged;
10910                 flags = IRQF_SHARED;
10911         }
10912
10913         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10914 }
10915
10916 static int tg3_test_interrupt(struct tg3 *tp)
10917 {
10918         struct tg3_napi *tnapi = &tp->napi[0];
10919         struct net_device *dev = tp->dev;
10920         int err, i, intr_ok = 0;
10921         u32 val;
10922
10923         if (!netif_running(dev))
10924                 return -ENODEV;
10925
10926         tg3_disable_ints(tp);
10927
10928         free_irq(tnapi->irq_vec, tnapi);
10929
10930         /*
10931          * Turn off MSI one shot mode.  Otherwise this test has no
10932          * observable way to know whether the interrupt was delivered.
10933          */
10934         if (tg3_flag(tp, 57765_PLUS)) {
10935                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10936                 tw32(MSGINT_MODE, val);
10937         }
10938
10939         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10940                           IRQF_SHARED, dev->name, tnapi);
10941         if (err)
10942                 return err;
10943
10944         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10945         tg3_enable_ints(tp);
10946
10947         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10948                tnapi->coal_now);
10949
10950         for (i = 0; i < 5; i++) {
10951                 u32 int_mbox, misc_host_ctrl;
10952
10953                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10954                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10955
10956                 if ((int_mbox != 0) ||
10957                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10958                         intr_ok = 1;
10959                         break;
10960                 }
10961
10962                 if (tg3_flag(tp, 57765_PLUS) &&
10963                     tnapi->hw_status->status_tag != tnapi->last_tag)
10964                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10965
10966                 msleep(10);
10967         }
10968
10969         tg3_disable_ints(tp);
10970
10971         free_irq(tnapi->irq_vec, tnapi);
10972
10973         err = tg3_request_irq(tp, 0);
10974
10975         if (err)
10976                 return err;
10977
10978         if (intr_ok) {
10979                 /* Reenable MSI one shot mode. */
10980                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10981                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10982                         tw32(MSGINT_MODE, val);
10983                 }
10984                 return 0;
10985         }
10986
10987         return -EIO;
10988 }
10989
10990 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10991  * successfully restored
10992  */
10993 static int tg3_test_msi(struct tg3 *tp)
10994 {
10995         int err;
10996         u16 pci_cmd;
10997
10998         if (!tg3_flag(tp, USING_MSI))
10999                 return 0;
11000
11001         /* Turn off SERR reporting in case MSI terminates with Master
11002          * Abort.
11003          */
11004         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11005         pci_write_config_word(tp->pdev, PCI_COMMAND,
11006                               pci_cmd & ~PCI_COMMAND_SERR);
11007
11008         err = tg3_test_interrupt(tp);
11009
11010         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11011
11012         if (!err)
11013                 return 0;
11014
11015         /* other failures */
11016         if (err != -EIO)
11017                 return err;
11018
11019         /* MSI test failed, go back to INTx mode */
11020         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11021                     "to INTx mode. Please report this failure to the PCI "
11022                     "maintainer and include system chipset information\n");
11023
11024         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11025
11026         pci_disable_msi(tp->pdev);
11027
11028         tg3_flag_clear(tp, USING_MSI);
11029         tp->napi[0].irq_vec = tp->pdev->irq;
11030
11031         err = tg3_request_irq(tp, 0);
11032         if (err)
11033                 return err;
11034
11035         /* Need to reset the chip because the MSI cycle may have terminated
11036          * with Master Abort.
11037          */
11038         tg3_full_lock(tp, 1);
11039
11040         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11041         err = tg3_init_hw(tp, true);
11042
11043         tg3_full_unlock(tp);
11044
11045         if (err)
11046                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11047
11048         return err;
11049 }
11050
11051 static int tg3_request_firmware(struct tg3 *tp)
11052 {
11053         const struct tg3_firmware_hdr *fw_hdr;
11054
11055         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11056                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11057                            tp->fw_needed);
11058                 return -ENOENT;
11059         }
11060
11061         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11062
11063         /* Firmware blob starts with version numbers, followed by
11064          * start address and _full_ length including BSS sections
11065          * (which must be longer than the actual data, of course
11066          */
11067
11068         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11069         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11070                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11071                            tp->fw_len, tp->fw_needed);
11072                 release_firmware(tp->fw);
11073                 tp->fw = NULL;
11074                 return -EINVAL;
11075         }
11076
11077         /* We no longer need firmware; we have it. */
11078         tp->fw_needed = NULL;
11079         return 0;
11080 }
11081
11082 static u32 tg3_irq_count(struct tg3 *tp)
11083 {
11084         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11085
11086         if (irq_cnt > 1) {
11087                 /* We want as many rx rings enabled as there are cpus.
11088                  * In multiqueue MSI-X mode, the first MSI-X vector
11089                  * only deals with link interrupts, etc, so we add
11090                  * one to the number of vectors we are requesting.
11091                  */
11092                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11093         }
11094
11095         return irq_cnt;
11096 }
11097
11098 static bool tg3_enable_msix(struct tg3 *tp)
11099 {
11100         int i, rc;
11101         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11102
11103         tp->txq_cnt = tp->txq_req;
11104         tp->rxq_cnt = tp->rxq_req;
11105         if (!tp->rxq_cnt)
11106                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11107         if (tp->rxq_cnt > tp->rxq_max)
11108                 tp->rxq_cnt = tp->rxq_max;
11109
11110         /* Disable multiple TX rings by default.  Simple round-robin hardware
11111          * scheduling of the TX rings can cause starvation of rings with
11112          * small packets when other rings have TSO or jumbo packets.
11113          */
11114         if (!tp->txq_req)
11115                 tp->txq_cnt = 1;
11116
11117         tp->irq_cnt = tg3_irq_count(tp);
11118
11119         for (i = 0; i < tp->irq_max; i++) {
11120                 msix_ent[i].entry  = i;
11121                 msix_ent[i].vector = 0;
11122         }
11123
11124         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11125         if (rc < 0) {
11126                 return false;
11127         } else if (rc != 0) {
11128                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11129                         return false;
11130                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11131                               tp->irq_cnt, rc);
11132                 tp->irq_cnt = rc;
11133                 tp->rxq_cnt = max(rc - 1, 1);
11134                 if (tp->txq_cnt)
11135                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11136         }
11137
11138         for (i = 0; i < tp->irq_max; i++)
11139                 tp->napi[i].irq_vec = msix_ent[i].vector;
11140
11141         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11142                 pci_disable_msix(tp->pdev);
11143                 return false;
11144         }
11145
11146         if (tp->irq_cnt == 1)
11147                 return true;
11148
11149         tg3_flag_set(tp, ENABLE_RSS);
11150
11151         if (tp->txq_cnt > 1)
11152                 tg3_flag_set(tp, ENABLE_TSS);
11153
11154         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11155
11156         return true;
11157 }
11158
11159 static void tg3_ints_init(struct tg3 *tp)
11160 {
11161         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11162             !tg3_flag(tp, TAGGED_STATUS)) {
11163                 /* All MSI supporting chips should support tagged
11164                  * status.  Assert that this is the case.
11165                  */
11166                 netdev_warn(tp->dev,
11167                             "MSI without TAGGED_STATUS? Not using MSI\n");
11168                 goto defcfg;
11169         }
11170
11171         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11172                 tg3_flag_set(tp, USING_MSIX);
11173         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11174                 tg3_flag_set(tp, USING_MSI);
11175
11176         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11177                 u32 msi_mode = tr32(MSGINT_MODE);
11178                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11179                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11180                 if (!tg3_flag(tp, 1SHOT_MSI))
11181                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11182                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11183         }
11184 defcfg:
11185         if (!tg3_flag(tp, USING_MSIX)) {
11186                 tp->irq_cnt = 1;
11187                 tp->napi[0].irq_vec = tp->pdev->irq;
11188         }
11189
11190         if (tp->irq_cnt == 1) {
11191                 tp->txq_cnt = 1;
11192                 tp->rxq_cnt = 1;
11193                 netif_set_real_num_tx_queues(tp->dev, 1);
11194                 netif_set_real_num_rx_queues(tp->dev, 1);
11195         }
11196 }
11197
11198 static void tg3_ints_fini(struct tg3 *tp)
11199 {
11200         if (tg3_flag(tp, USING_MSIX))
11201                 pci_disable_msix(tp->pdev);
11202         else if (tg3_flag(tp, USING_MSI))
11203                 pci_disable_msi(tp->pdev);
11204         tg3_flag_clear(tp, USING_MSI);
11205         tg3_flag_clear(tp, USING_MSIX);
11206         tg3_flag_clear(tp, ENABLE_RSS);
11207         tg3_flag_clear(tp, ENABLE_TSS);
11208 }
11209
11210 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11211                      bool init)
11212 {
11213         struct net_device *dev = tp->dev;
11214         int i, err;
11215
11216         /*
11217          * Setup interrupts first so we know how
11218          * many NAPI resources to allocate
11219          */
11220         tg3_ints_init(tp);
11221
11222         tg3_rss_check_indir_tbl(tp);
11223
11224         /* The placement of this call is tied
11225          * to the setup and use of Host TX descriptors.
11226          */
11227         err = tg3_alloc_consistent(tp);
11228         if (err)
11229                 goto err_out1;
11230
11231         tg3_napi_init(tp);
11232
11233         tg3_napi_enable(tp);
11234
11235         for (i = 0; i < tp->irq_cnt; i++) {
11236                 struct tg3_napi *tnapi = &tp->napi[i];
11237                 err = tg3_request_irq(tp, i);
11238                 if (err) {
11239                         for (i--; i >= 0; i--) {
11240                                 tnapi = &tp->napi[i];
11241                                 free_irq(tnapi->irq_vec, tnapi);
11242                         }
11243                         goto err_out2;
11244                 }
11245         }
11246
11247         tg3_full_lock(tp, 0);
11248
11249         err = tg3_init_hw(tp, reset_phy);
11250         if (err) {
11251                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11252                 tg3_free_rings(tp);
11253         }
11254
11255         tg3_full_unlock(tp);
11256
11257         if (err)
11258                 goto err_out3;
11259
11260         if (test_irq && tg3_flag(tp, USING_MSI)) {
11261                 err = tg3_test_msi(tp);
11262
11263                 if (err) {
11264                         tg3_full_lock(tp, 0);
11265                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11266                         tg3_free_rings(tp);
11267                         tg3_full_unlock(tp);
11268
11269                         goto err_out2;
11270                 }
11271
11272                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11273                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11274
11275                         tw32(PCIE_TRANSACTION_CFG,
11276                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11277                 }
11278         }
11279
11280         tg3_phy_start(tp);
11281
11282         tg3_hwmon_open(tp);
11283
11284         tg3_full_lock(tp, 0);
11285
11286         tg3_timer_start(tp);
11287         tg3_flag_set(tp, INIT_COMPLETE);
11288         tg3_enable_ints(tp);
11289
11290         if (init)
11291                 tg3_ptp_init(tp);
11292         else
11293                 tg3_ptp_resume(tp);
11294
11295
11296         tg3_full_unlock(tp);
11297
11298         netif_tx_start_all_queues(dev);
11299
11300         /*
11301          * Reset loopback feature if it was turned on while the device was down
11302          * make sure that it's installed properly now.
11303          */
11304         if (dev->features & NETIF_F_LOOPBACK)
11305                 tg3_set_loopback(dev, dev->features);
11306
11307         return 0;
11308
11309 err_out3:
11310         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11311                 struct tg3_napi *tnapi = &tp->napi[i];
11312                 free_irq(tnapi->irq_vec, tnapi);
11313         }
11314
11315 err_out2:
11316         tg3_napi_disable(tp);
11317         tg3_napi_fini(tp);
11318         tg3_free_consistent(tp);
11319
11320 err_out1:
11321         tg3_ints_fini(tp);
11322
11323         return err;
11324 }
11325
11326 static void tg3_stop(struct tg3 *tp)
11327 {
11328         int i;
11329
11330         tg3_reset_task_cancel(tp);
11331         tg3_netif_stop(tp);
11332
11333         tg3_timer_stop(tp);
11334
11335         tg3_hwmon_close(tp);
11336
11337         tg3_phy_stop(tp);
11338
11339         tg3_full_lock(tp, 1);
11340
11341         tg3_disable_ints(tp);
11342
11343         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11344         tg3_free_rings(tp);
11345         tg3_flag_clear(tp, INIT_COMPLETE);
11346
11347         tg3_full_unlock(tp);
11348
11349         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11350                 struct tg3_napi *tnapi = &tp->napi[i];
11351                 free_irq(tnapi->irq_vec, tnapi);
11352         }
11353
11354         tg3_ints_fini(tp);
11355
11356         tg3_napi_fini(tp);
11357
11358         tg3_free_consistent(tp);
11359 }
11360
11361 static int tg3_open(struct net_device *dev)
11362 {
11363         struct tg3 *tp = netdev_priv(dev);
11364         int err;
11365
11366         if (tp->fw_needed) {
11367                 err = tg3_request_firmware(tp);
11368                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11369                         if (err) {
11370                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11371                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11372                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11373                                 netdev_warn(tp->dev, "EEE capability restored\n");
11374                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11375                         }
11376                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11377                         if (err)
11378                                 return err;
11379                 } else if (err) {
11380                         netdev_warn(tp->dev, "TSO capability disabled\n");
11381                         tg3_flag_clear(tp, TSO_CAPABLE);
11382                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11383                         netdev_notice(tp->dev, "TSO capability restored\n");
11384                         tg3_flag_set(tp, TSO_CAPABLE);
11385                 }
11386         }
11387
11388         tg3_carrier_off(tp);
11389
11390         err = tg3_power_up(tp);
11391         if (err)
11392                 return err;
11393
11394         tg3_full_lock(tp, 0);
11395
11396         tg3_disable_ints(tp);
11397         tg3_flag_clear(tp, INIT_COMPLETE);
11398
11399         tg3_full_unlock(tp);
11400
11401         err = tg3_start(tp,
11402                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11403                         true, true);
11404         if (err) {
11405                 tg3_frob_aux_power(tp, false);
11406                 pci_set_power_state(tp->pdev, PCI_D3hot);
11407         }
11408
11409         if (tg3_flag(tp, PTP_CAPABLE)) {
11410                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11411                                                    &tp->pdev->dev);
11412                 if (IS_ERR(tp->ptp_clock))
11413                         tp->ptp_clock = NULL;
11414         }
11415
11416         return err;
11417 }
11418
11419 static int tg3_close(struct net_device *dev)
11420 {
11421         struct tg3 *tp = netdev_priv(dev);
11422
11423         tg3_ptp_fini(tp);
11424
11425         tg3_stop(tp);
11426
11427         /* Clear stats across close / open calls */
11428         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11429         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11430
11431         tg3_power_down(tp);
11432
11433         tg3_carrier_off(tp);
11434
11435         return 0;
11436 }
11437
11438 static inline u64 get_stat64(tg3_stat64_t *val)
11439 {
11440        return ((u64)val->high << 32) | ((u64)val->low);
11441 }
11442
11443 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11444 {
11445         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11446
11447         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11448             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11449              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11450                 u32 val;
11451
11452                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11453                         tg3_writephy(tp, MII_TG3_TEST1,
11454                                      val | MII_TG3_TEST1_CRC_EN);
11455                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11456                 } else
11457                         val = 0;
11458
11459                 tp->phy_crc_errors += val;
11460
11461                 return tp->phy_crc_errors;
11462         }
11463
11464         return get_stat64(&hw_stats->rx_fcs_errors);
11465 }
11466
11467 #define ESTAT_ADD(member) \
11468         estats->member =        old_estats->member + \
11469                                 get_stat64(&hw_stats->member)
11470
11471 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11472 {
11473         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11474         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11475
11476         ESTAT_ADD(rx_octets);
11477         ESTAT_ADD(rx_fragments);
11478         ESTAT_ADD(rx_ucast_packets);
11479         ESTAT_ADD(rx_mcast_packets);
11480         ESTAT_ADD(rx_bcast_packets);
11481         ESTAT_ADD(rx_fcs_errors);
11482         ESTAT_ADD(rx_align_errors);
11483         ESTAT_ADD(rx_xon_pause_rcvd);
11484         ESTAT_ADD(rx_xoff_pause_rcvd);
11485         ESTAT_ADD(rx_mac_ctrl_rcvd);
11486         ESTAT_ADD(rx_xoff_entered);
11487         ESTAT_ADD(rx_frame_too_long_errors);
11488         ESTAT_ADD(rx_jabbers);
11489         ESTAT_ADD(rx_undersize_packets);
11490         ESTAT_ADD(rx_in_length_errors);
11491         ESTAT_ADD(rx_out_length_errors);
11492         ESTAT_ADD(rx_64_or_less_octet_packets);
11493         ESTAT_ADD(rx_65_to_127_octet_packets);
11494         ESTAT_ADD(rx_128_to_255_octet_packets);
11495         ESTAT_ADD(rx_256_to_511_octet_packets);
11496         ESTAT_ADD(rx_512_to_1023_octet_packets);
11497         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11498         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11499         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11500         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11501         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11502
11503         ESTAT_ADD(tx_octets);
11504         ESTAT_ADD(tx_collisions);
11505         ESTAT_ADD(tx_xon_sent);
11506         ESTAT_ADD(tx_xoff_sent);
11507         ESTAT_ADD(tx_flow_control);
11508         ESTAT_ADD(tx_mac_errors);
11509         ESTAT_ADD(tx_single_collisions);
11510         ESTAT_ADD(tx_mult_collisions);
11511         ESTAT_ADD(tx_deferred);
11512         ESTAT_ADD(tx_excessive_collisions);
11513         ESTAT_ADD(tx_late_collisions);
11514         ESTAT_ADD(tx_collide_2times);
11515         ESTAT_ADD(tx_collide_3times);
11516         ESTAT_ADD(tx_collide_4times);
11517         ESTAT_ADD(tx_collide_5times);
11518         ESTAT_ADD(tx_collide_6times);
11519         ESTAT_ADD(tx_collide_7times);
11520         ESTAT_ADD(tx_collide_8times);
11521         ESTAT_ADD(tx_collide_9times);
11522         ESTAT_ADD(tx_collide_10times);
11523         ESTAT_ADD(tx_collide_11times);
11524         ESTAT_ADD(tx_collide_12times);
11525         ESTAT_ADD(tx_collide_13times);
11526         ESTAT_ADD(tx_collide_14times);
11527         ESTAT_ADD(tx_collide_15times);
11528         ESTAT_ADD(tx_ucast_packets);
11529         ESTAT_ADD(tx_mcast_packets);
11530         ESTAT_ADD(tx_bcast_packets);
11531         ESTAT_ADD(tx_carrier_sense_errors);
11532         ESTAT_ADD(tx_discards);
11533         ESTAT_ADD(tx_errors);
11534
11535         ESTAT_ADD(dma_writeq_full);
11536         ESTAT_ADD(dma_write_prioq_full);
11537         ESTAT_ADD(rxbds_empty);
11538         ESTAT_ADD(rx_discards);
11539         ESTAT_ADD(rx_errors);
11540         ESTAT_ADD(rx_threshold_hit);
11541
11542         ESTAT_ADD(dma_readq_full);
11543         ESTAT_ADD(dma_read_prioq_full);
11544         ESTAT_ADD(tx_comp_queue_full);
11545
11546         ESTAT_ADD(ring_set_send_prod_index);
11547         ESTAT_ADD(ring_status_update);
11548         ESTAT_ADD(nic_irqs);
11549         ESTAT_ADD(nic_avoided_irqs);
11550         ESTAT_ADD(nic_tx_threshold_hit);
11551
11552         ESTAT_ADD(mbuf_lwm_thresh_hit);
11553 }
11554
11555 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11556 {
11557         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11558         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11559
11560         stats->rx_packets = old_stats->rx_packets +
11561                 get_stat64(&hw_stats->rx_ucast_packets) +
11562                 get_stat64(&hw_stats->rx_mcast_packets) +
11563                 get_stat64(&hw_stats->rx_bcast_packets);
11564
11565         stats->tx_packets = old_stats->tx_packets +
11566                 get_stat64(&hw_stats->tx_ucast_packets) +
11567                 get_stat64(&hw_stats->tx_mcast_packets) +
11568                 get_stat64(&hw_stats->tx_bcast_packets);
11569
11570         stats->rx_bytes = old_stats->rx_bytes +
11571                 get_stat64(&hw_stats->rx_octets);
11572         stats->tx_bytes = old_stats->tx_bytes +
11573                 get_stat64(&hw_stats->tx_octets);
11574
11575         stats->rx_errors = old_stats->rx_errors +
11576                 get_stat64(&hw_stats->rx_errors);
11577         stats->tx_errors = old_stats->tx_errors +
11578                 get_stat64(&hw_stats->tx_errors) +
11579                 get_stat64(&hw_stats->tx_mac_errors) +
11580                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11581                 get_stat64(&hw_stats->tx_discards);
11582
11583         stats->multicast = old_stats->multicast +
11584                 get_stat64(&hw_stats->rx_mcast_packets);
11585         stats->collisions = old_stats->collisions +
11586                 get_stat64(&hw_stats->tx_collisions);
11587
11588         stats->rx_length_errors = old_stats->rx_length_errors +
11589                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11590                 get_stat64(&hw_stats->rx_undersize_packets);
11591
11592         stats->rx_over_errors = old_stats->rx_over_errors +
11593                 get_stat64(&hw_stats->rxbds_empty);
11594         stats->rx_frame_errors = old_stats->rx_frame_errors +
11595                 get_stat64(&hw_stats->rx_align_errors);
11596         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11597                 get_stat64(&hw_stats->tx_discards);
11598         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11599                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11600
11601         stats->rx_crc_errors = old_stats->rx_crc_errors +
11602                 tg3_calc_crc_errors(tp);
11603
11604         stats->rx_missed_errors = old_stats->rx_missed_errors +
11605                 get_stat64(&hw_stats->rx_discards);
11606
11607         stats->rx_dropped = tp->rx_dropped;
11608         stats->tx_dropped = tp->tx_dropped;
11609 }
11610
11611 static int tg3_get_regs_len(struct net_device *dev)
11612 {
11613         return TG3_REG_BLK_SIZE;
11614 }
11615
11616 static void tg3_get_regs(struct net_device *dev,
11617                 struct ethtool_regs *regs, void *_p)
11618 {
11619         struct tg3 *tp = netdev_priv(dev);
11620
11621         regs->version = 0;
11622
11623         memset(_p, 0, TG3_REG_BLK_SIZE);
11624
11625         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11626                 return;
11627
11628         tg3_full_lock(tp, 0);
11629
11630         tg3_dump_legacy_regs(tp, (u32 *)_p);
11631
11632         tg3_full_unlock(tp);
11633 }
11634
11635 static int tg3_get_eeprom_len(struct net_device *dev)
11636 {
11637         struct tg3 *tp = netdev_priv(dev);
11638
11639         return tp->nvram_size;
11640 }
11641
11642 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11643 {
11644         struct tg3 *tp = netdev_priv(dev);
11645         int ret;
11646         u8  *pd;
11647         u32 i, offset, len, b_offset, b_count;
11648         __be32 val;
11649
11650         if (tg3_flag(tp, NO_NVRAM))
11651                 return -EINVAL;
11652
11653         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11654                 return -EAGAIN;
11655
11656         offset = eeprom->offset;
11657         len = eeprom->len;
11658         eeprom->len = 0;
11659
11660         eeprom->magic = TG3_EEPROM_MAGIC;
11661
11662         if (offset & 3) {
11663                 /* adjustments to start on required 4 byte boundary */
11664                 b_offset = offset & 3;
11665                 b_count = 4 - b_offset;
11666                 if (b_count > len) {
11667                         /* i.e. offset=1 len=2 */
11668                         b_count = len;
11669                 }
11670                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11671                 if (ret)
11672                         return ret;
11673                 memcpy(data, ((char *)&val) + b_offset, b_count);
11674                 len -= b_count;
11675                 offset += b_count;
11676                 eeprom->len += b_count;
11677         }
11678
11679         /* read bytes up to the last 4 byte boundary */
11680         pd = &data[eeprom->len];
11681         for (i = 0; i < (len - (len & 3)); i += 4) {
11682                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11683                 if (ret) {
11684                         eeprom->len += i;
11685                         return ret;
11686                 }
11687                 memcpy(pd + i, &val, 4);
11688         }
11689         eeprom->len += i;
11690
11691         if (len & 3) {
11692                 /* read last bytes not ending on 4 byte boundary */
11693                 pd = &data[eeprom->len];
11694                 b_count = len & 3;
11695                 b_offset = offset + len - b_count;
11696                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11697                 if (ret)
11698                         return ret;
11699                 memcpy(pd, &val, b_count);
11700                 eeprom->len += b_count;
11701         }
11702         return 0;
11703 }
11704
11705 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11706 {
11707         struct tg3 *tp = netdev_priv(dev);
11708         int ret;
11709         u32 offset, len, b_offset, odd_len;
11710         u8 *buf;
11711         __be32 start, end;
11712
11713         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11714                 return -EAGAIN;
11715
11716         if (tg3_flag(tp, NO_NVRAM) ||
11717             eeprom->magic != TG3_EEPROM_MAGIC)
11718                 return -EINVAL;
11719
11720         offset = eeprom->offset;
11721         len = eeprom->len;
11722
11723         if ((b_offset = (offset & 3))) {
11724                 /* adjustments to start on required 4 byte boundary */
11725                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11726                 if (ret)
11727                         return ret;
11728                 len += b_offset;
11729                 offset &= ~3;
11730                 if (len < 4)
11731                         len = 4;
11732         }
11733
11734         odd_len = 0;
11735         if (len & 3) {
11736                 /* adjustments to end on required 4 byte boundary */
11737                 odd_len = 1;
11738                 len = (len + 3) & ~3;
11739                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11740                 if (ret)
11741                         return ret;
11742         }
11743
11744         buf = data;
11745         if (b_offset || odd_len) {
11746                 buf = kmalloc(len, GFP_KERNEL);
11747                 if (!buf)
11748                         return -ENOMEM;
11749                 if (b_offset)
11750                         memcpy(buf, &start, 4);
11751                 if (odd_len)
11752                         memcpy(buf+len-4, &end, 4);
11753                 memcpy(buf + b_offset, data, eeprom->len);
11754         }
11755
11756         ret = tg3_nvram_write_block(tp, offset, len, buf);
11757
11758         if (buf != data)
11759                 kfree(buf);
11760
11761         return ret;
11762 }
11763
11764 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11765 {
11766         struct tg3 *tp = netdev_priv(dev);
11767
11768         if (tg3_flag(tp, USE_PHYLIB)) {
11769                 struct phy_device *phydev;
11770                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11771                         return -EAGAIN;
11772                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11773                 return phy_ethtool_gset(phydev, cmd);
11774         }
11775
11776         cmd->supported = (SUPPORTED_Autoneg);
11777
11778         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11779                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11780                                    SUPPORTED_1000baseT_Full);
11781
11782         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11783                 cmd->supported |= (SUPPORTED_100baseT_Half |
11784                                   SUPPORTED_100baseT_Full |
11785                                   SUPPORTED_10baseT_Half |
11786                                   SUPPORTED_10baseT_Full |
11787                                   SUPPORTED_TP);
11788                 cmd->port = PORT_TP;
11789         } else {
11790                 cmd->supported |= SUPPORTED_FIBRE;
11791                 cmd->port = PORT_FIBRE;
11792         }
11793
11794         cmd->advertising = tp->link_config.advertising;
11795         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11796                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11797                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11798                                 cmd->advertising |= ADVERTISED_Pause;
11799                         } else {
11800                                 cmd->advertising |= ADVERTISED_Pause |
11801                                                     ADVERTISED_Asym_Pause;
11802                         }
11803                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11804                         cmd->advertising |= ADVERTISED_Asym_Pause;
11805                 }
11806         }
11807         if (netif_running(dev) && tp->link_up) {
11808                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11809                 cmd->duplex = tp->link_config.active_duplex;
11810                 cmd->lp_advertising = tp->link_config.rmt_adv;
11811                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11812                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11813                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11814                         else
11815                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11816                 }
11817         } else {
11818                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11819                 cmd->duplex = DUPLEX_UNKNOWN;
11820                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11821         }
11822         cmd->phy_address = tp->phy_addr;
11823         cmd->transceiver = XCVR_INTERNAL;
11824         cmd->autoneg = tp->link_config.autoneg;
11825         cmd->maxtxpkt = 0;
11826         cmd->maxrxpkt = 0;
11827         return 0;
11828 }
11829
11830 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11831 {
11832         struct tg3 *tp = netdev_priv(dev);
11833         u32 speed = ethtool_cmd_speed(cmd);
11834
11835         if (tg3_flag(tp, USE_PHYLIB)) {
11836                 struct phy_device *phydev;
11837                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11838                         return -EAGAIN;
11839                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11840                 return phy_ethtool_sset(phydev, cmd);
11841         }
11842
11843         if (cmd->autoneg != AUTONEG_ENABLE &&
11844             cmd->autoneg != AUTONEG_DISABLE)
11845                 return -EINVAL;
11846
11847         if (cmd->autoneg == AUTONEG_DISABLE &&
11848             cmd->duplex != DUPLEX_FULL &&
11849             cmd->duplex != DUPLEX_HALF)
11850                 return -EINVAL;
11851
11852         if (cmd->autoneg == AUTONEG_ENABLE) {
11853                 u32 mask = ADVERTISED_Autoneg |
11854                            ADVERTISED_Pause |
11855                            ADVERTISED_Asym_Pause;
11856
11857                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11858                         mask |= ADVERTISED_1000baseT_Half |
11859                                 ADVERTISED_1000baseT_Full;
11860
11861                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11862                         mask |= ADVERTISED_100baseT_Half |
11863                                 ADVERTISED_100baseT_Full |
11864                                 ADVERTISED_10baseT_Half |
11865                                 ADVERTISED_10baseT_Full |
11866                                 ADVERTISED_TP;
11867                 else
11868                         mask |= ADVERTISED_FIBRE;
11869
11870                 if (cmd->advertising & ~mask)
11871                         return -EINVAL;
11872
11873                 mask &= (ADVERTISED_1000baseT_Half |
11874                          ADVERTISED_1000baseT_Full |
11875                          ADVERTISED_100baseT_Half |
11876                          ADVERTISED_100baseT_Full |
11877                          ADVERTISED_10baseT_Half |
11878                          ADVERTISED_10baseT_Full);
11879
11880                 cmd->advertising &= mask;
11881         } else {
11882                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11883                         if (speed != SPEED_1000)
11884                                 return -EINVAL;
11885
11886                         if (cmd->duplex != DUPLEX_FULL)
11887                                 return -EINVAL;
11888                 } else {
11889                         if (speed != SPEED_100 &&
11890                             speed != SPEED_10)
11891                                 return -EINVAL;
11892                 }
11893         }
11894
11895         tg3_full_lock(tp, 0);
11896
11897         tp->link_config.autoneg = cmd->autoneg;
11898         if (cmd->autoneg == AUTONEG_ENABLE) {
11899                 tp->link_config.advertising = (cmd->advertising |
11900                                               ADVERTISED_Autoneg);
11901                 tp->link_config.speed = SPEED_UNKNOWN;
11902                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11903         } else {
11904                 tp->link_config.advertising = 0;
11905                 tp->link_config.speed = speed;
11906                 tp->link_config.duplex = cmd->duplex;
11907         }
11908
11909         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11910
11911         tg3_warn_mgmt_link_flap(tp);
11912
11913         if (netif_running(dev))
11914                 tg3_setup_phy(tp, true);
11915
11916         tg3_full_unlock(tp);
11917
11918         return 0;
11919 }
11920
11921 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11922 {
11923         struct tg3 *tp = netdev_priv(dev);
11924
11925         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11926         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11927         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11928         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11929 }
11930
11931 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11932 {
11933         struct tg3 *tp = netdev_priv(dev);
11934
11935         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11936                 wol->supported = WAKE_MAGIC;
11937         else
11938                 wol->supported = 0;
11939         wol->wolopts = 0;
11940         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11941                 wol->wolopts = WAKE_MAGIC;
11942         memset(&wol->sopass, 0, sizeof(wol->sopass));
11943 }
11944
11945 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11946 {
11947         struct tg3 *tp = netdev_priv(dev);
11948         struct device *dp = &tp->pdev->dev;
11949
11950         if (wol->wolopts & ~WAKE_MAGIC)
11951                 return -EINVAL;
11952         if ((wol->wolopts & WAKE_MAGIC) &&
11953             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11954                 return -EINVAL;
11955
11956         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11957
11958         spin_lock_bh(&tp->lock);
11959         if (device_may_wakeup(dp))
11960                 tg3_flag_set(tp, WOL_ENABLE);
11961         else
11962                 tg3_flag_clear(tp, WOL_ENABLE);
11963         spin_unlock_bh(&tp->lock);
11964
11965         return 0;
11966 }
11967
11968 static u32 tg3_get_msglevel(struct net_device *dev)
11969 {
11970         struct tg3 *tp = netdev_priv(dev);
11971         return tp->msg_enable;
11972 }
11973
11974 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11975 {
11976         struct tg3 *tp = netdev_priv(dev);
11977         tp->msg_enable = value;
11978 }
11979
11980 static int tg3_nway_reset(struct net_device *dev)
11981 {
11982         struct tg3 *tp = netdev_priv(dev);
11983         int r;
11984
11985         if (!netif_running(dev))
11986                 return -EAGAIN;
11987
11988         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11989                 return -EINVAL;
11990
11991         tg3_warn_mgmt_link_flap(tp);
11992
11993         if (tg3_flag(tp, USE_PHYLIB)) {
11994                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11995                         return -EAGAIN;
11996                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11997         } else {
11998                 u32 bmcr;
11999
12000                 spin_lock_bh(&tp->lock);
12001                 r = -EINVAL;
12002                 tg3_readphy(tp, MII_BMCR, &bmcr);
12003                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12004                     ((bmcr & BMCR_ANENABLE) ||
12005                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12006                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12007                                                    BMCR_ANENABLE);
12008                         r = 0;
12009                 }
12010                 spin_unlock_bh(&tp->lock);
12011         }
12012
12013         return r;
12014 }
12015
12016 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12017 {
12018         struct tg3 *tp = netdev_priv(dev);
12019
12020         ering->rx_max_pending = tp->rx_std_ring_mask;
12021         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12022                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12023         else
12024                 ering->rx_jumbo_max_pending = 0;
12025
12026         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12027
12028         ering->rx_pending = tp->rx_pending;
12029         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12030                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12031         else
12032                 ering->rx_jumbo_pending = 0;
12033
12034         ering->tx_pending = tp->napi[0].tx_pending;
12035 }
12036
12037 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12038 {
12039         struct tg3 *tp = netdev_priv(dev);
12040         int i, irq_sync = 0, err = 0;
12041
12042         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12043             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12044             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12045             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12046             (tg3_flag(tp, TSO_BUG) &&
12047              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12048                 return -EINVAL;
12049
12050         if (netif_running(dev)) {
12051                 tg3_phy_stop(tp);
12052                 tg3_netif_stop(tp);
12053                 irq_sync = 1;
12054         }
12055
12056         tg3_full_lock(tp, irq_sync);
12057
12058         tp->rx_pending = ering->rx_pending;
12059
12060         if (tg3_flag(tp, MAX_RXPEND_64) &&
12061             tp->rx_pending > 63)
12062                 tp->rx_pending = 63;
12063         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12064
12065         for (i = 0; i < tp->irq_max; i++)
12066                 tp->napi[i].tx_pending = ering->tx_pending;
12067
12068         if (netif_running(dev)) {
12069                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12070                 err = tg3_restart_hw(tp, false);
12071                 if (!err)
12072                         tg3_netif_start(tp);
12073         }
12074
12075         tg3_full_unlock(tp);
12076
12077         if (irq_sync && !err)
12078                 tg3_phy_start(tp);
12079
12080         return err;
12081 }
12082
12083 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12084 {
12085         struct tg3 *tp = netdev_priv(dev);
12086
12087         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12088
12089         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12090                 epause->rx_pause = 1;
12091         else
12092                 epause->rx_pause = 0;
12093
12094         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12095                 epause->tx_pause = 1;
12096         else
12097                 epause->tx_pause = 0;
12098 }
12099
12100 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12101 {
12102         struct tg3 *tp = netdev_priv(dev);
12103         int err = 0;
12104
12105         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12106                 tg3_warn_mgmt_link_flap(tp);
12107
12108         if (tg3_flag(tp, USE_PHYLIB)) {
12109                 u32 newadv;
12110                 struct phy_device *phydev;
12111
12112                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12113
12114                 if (!(phydev->supported & SUPPORTED_Pause) ||
12115                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12116                      (epause->rx_pause != epause->tx_pause)))
12117                         return -EINVAL;
12118
12119                 tp->link_config.flowctrl = 0;
12120                 if (epause->rx_pause) {
12121                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12122
12123                         if (epause->tx_pause) {
12124                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12125                                 newadv = ADVERTISED_Pause;
12126                         } else
12127                                 newadv = ADVERTISED_Pause |
12128                                          ADVERTISED_Asym_Pause;
12129                 } else if (epause->tx_pause) {
12130                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12131                         newadv = ADVERTISED_Asym_Pause;
12132                 } else
12133                         newadv = 0;
12134
12135                 if (epause->autoneg)
12136                         tg3_flag_set(tp, PAUSE_AUTONEG);
12137                 else
12138                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12139
12140                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12141                         u32 oldadv = phydev->advertising &
12142                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12143                         if (oldadv != newadv) {
12144                                 phydev->advertising &=
12145                                         ~(ADVERTISED_Pause |
12146                                           ADVERTISED_Asym_Pause);
12147                                 phydev->advertising |= newadv;
12148                                 if (phydev->autoneg) {
12149                                         /*
12150                                          * Always renegotiate the link to
12151                                          * inform our link partner of our
12152                                          * flow control settings, even if the
12153                                          * flow control is forced.  Let
12154                                          * tg3_adjust_link() do the final
12155                                          * flow control setup.
12156                                          */
12157                                         return phy_start_aneg(phydev);
12158                                 }
12159                         }
12160
12161                         if (!epause->autoneg)
12162                                 tg3_setup_flow_control(tp, 0, 0);
12163                 } else {
12164                         tp->link_config.advertising &=
12165                                         ~(ADVERTISED_Pause |
12166                                           ADVERTISED_Asym_Pause);
12167                         tp->link_config.advertising |= newadv;
12168                 }
12169         } else {
12170                 int irq_sync = 0;
12171
12172                 if (netif_running(dev)) {
12173                         tg3_netif_stop(tp);
12174                         irq_sync = 1;
12175                 }
12176
12177                 tg3_full_lock(tp, irq_sync);
12178
12179                 if (epause->autoneg)
12180                         tg3_flag_set(tp, PAUSE_AUTONEG);
12181                 else
12182                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12183                 if (epause->rx_pause)
12184                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12185                 else
12186                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12187                 if (epause->tx_pause)
12188                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12189                 else
12190                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12191
12192                 if (netif_running(dev)) {
12193                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12194                         err = tg3_restart_hw(tp, false);
12195                         if (!err)
12196                                 tg3_netif_start(tp);
12197                 }
12198
12199                 tg3_full_unlock(tp);
12200         }
12201
12202         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12203
12204         return err;
12205 }
12206
12207 static int tg3_get_sset_count(struct net_device *dev, int sset)
12208 {
12209         switch (sset) {
12210         case ETH_SS_TEST:
12211                 return TG3_NUM_TEST;
12212         case ETH_SS_STATS:
12213                 return TG3_NUM_STATS;
12214         default:
12215                 return -EOPNOTSUPP;
12216         }
12217 }
12218
12219 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12220                          u32 *rules __always_unused)
12221 {
12222         struct tg3 *tp = netdev_priv(dev);
12223
12224         if (!tg3_flag(tp, SUPPORT_MSIX))
12225                 return -EOPNOTSUPP;
12226
12227         switch (info->cmd) {
12228         case ETHTOOL_GRXRINGS:
12229                 if (netif_running(tp->dev))
12230                         info->data = tp->rxq_cnt;
12231                 else {
12232                         info->data = num_online_cpus();
12233                         if (info->data > TG3_RSS_MAX_NUM_QS)
12234                                 info->data = TG3_RSS_MAX_NUM_QS;
12235                 }
12236
12237                 /* The first interrupt vector only
12238                  * handles link interrupts.
12239                  */
12240                 info->data -= 1;
12241                 return 0;
12242
12243         default:
12244                 return -EOPNOTSUPP;
12245         }
12246 }
12247
12248 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12249 {
12250         u32 size = 0;
12251         struct tg3 *tp = netdev_priv(dev);
12252
12253         if (tg3_flag(tp, SUPPORT_MSIX))
12254                 size = TG3_RSS_INDIR_TBL_SIZE;
12255
12256         return size;
12257 }
12258
12259 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12260 {
12261         struct tg3 *tp = netdev_priv(dev);
12262         int i;
12263
12264         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12265                 indir[i] = tp->rss_ind_tbl[i];
12266
12267         return 0;
12268 }
12269
12270 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12271 {
12272         struct tg3 *tp = netdev_priv(dev);
12273         size_t i;
12274
12275         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12276                 tp->rss_ind_tbl[i] = indir[i];
12277
12278         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12279                 return 0;
12280
12281         /* It is legal to write the indirection
12282          * table while the device is running.
12283          */
12284         tg3_full_lock(tp, 0);
12285         tg3_rss_write_indir_tbl(tp);
12286         tg3_full_unlock(tp);
12287
12288         return 0;
12289 }
12290
12291 static void tg3_get_channels(struct net_device *dev,
12292                              struct ethtool_channels *channel)
12293 {
12294         struct tg3 *tp = netdev_priv(dev);
12295         u32 deflt_qs = netif_get_num_default_rss_queues();
12296
12297         channel->max_rx = tp->rxq_max;
12298         channel->max_tx = tp->txq_max;
12299
12300         if (netif_running(dev)) {
12301                 channel->rx_count = tp->rxq_cnt;
12302                 channel->tx_count = tp->txq_cnt;
12303         } else {
12304                 if (tp->rxq_req)
12305                         channel->rx_count = tp->rxq_req;
12306                 else
12307                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12308
12309                 if (tp->txq_req)
12310                         channel->tx_count = tp->txq_req;
12311                 else
12312                         channel->tx_count = min(deflt_qs, tp->txq_max);
12313         }
12314 }
12315
12316 static int tg3_set_channels(struct net_device *dev,
12317                             struct ethtool_channels *channel)
12318 {
12319         struct tg3 *tp = netdev_priv(dev);
12320
12321         if (!tg3_flag(tp, SUPPORT_MSIX))
12322                 return -EOPNOTSUPP;
12323
12324         if (channel->rx_count > tp->rxq_max ||
12325             channel->tx_count > tp->txq_max)
12326                 return -EINVAL;
12327
12328         tp->rxq_req = channel->rx_count;
12329         tp->txq_req = channel->tx_count;
12330
12331         if (!netif_running(dev))
12332                 return 0;
12333
12334         tg3_stop(tp);
12335
12336         tg3_carrier_off(tp);
12337
12338         tg3_start(tp, true, false, false);
12339
12340         return 0;
12341 }
12342
12343 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12344 {
12345         switch (stringset) {
12346         case ETH_SS_STATS:
12347                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12348                 break;
12349         case ETH_SS_TEST:
12350                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12351                 break;
12352         default:
12353                 WARN_ON(1);     /* we need a WARN() */
12354                 break;
12355         }
12356 }
12357
12358 static int tg3_set_phys_id(struct net_device *dev,
12359                             enum ethtool_phys_id_state state)
12360 {
12361         struct tg3 *tp = netdev_priv(dev);
12362
12363         if (!netif_running(tp->dev))
12364                 return -EAGAIN;
12365
12366         switch (state) {
12367         case ETHTOOL_ID_ACTIVE:
12368                 return 1;       /* cycle on/off once per second */
12369
12370         case ETHTOOL_ID_ON:
12371                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12372                      LED_CTRL_1000MBPS_ON |
12373                      LED_CTRL_100MBPS_ON |
12374                      LED_CTRL_10MBPS_ON |
12375                      LED_CTRL_TRAFFIC_OVERRIDE |
12376                      LED_CTRL_TRAFFIC_BLINK |
12377                      LED_CTRL_TRAFFIC_LED);
12378                 break;
12379
12380         case ETHTOOL_ID_OFF:
12381                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12382                      LED_CTRL_TRAFFIC_OVERRIDE);
12383                 break;
12384
12385         case ETHTOOL_ID_INACTIVE:
12386                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12387                 break;
12388         }
12389
12390         return 0;
12391 }
12392
12393 static void tg3_get_ethtool_stats(struct net_device *dev,
12394                                    struct ethtool_stats *estats, u64 *tmp_stats)
12395 {
12396         struct tg3 *tp = netdev_priv(dev);
12397
12398         if (tp->hw_stats)
12399                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12400         else
12401                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12402 }
12403
12404 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12405 {
12406         int i;
12407         __be32 *buf;
12408         u32 offset = 0, len = 0;
12409         u32 magic, val;
12410
12411         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12412                 return NULL;
12413
12414         if (magic == TG3_EEPROM_MAGIC) {
12415                 for (offset = TG3_NVM_DIR_START;
12416                      offset < TG3_NVM_DIR_END;
12417                      offset += TG3_NVM_DIRENT_SIZE) {
12418                         if (tg3_nvram_read(tp, offset, &val))
12419                                 return NULL;
12420
12421                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12422                             TG3_NVM_DIRTYPE_EXTVPD)
12423                                 break;
12424                 }
12425
12426                 if (offset != TG3_NVM_DIR_END) {
12427                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12428                         if (tg3_nvram_read(tp, offset + 4, &offset))
12429                                 return NULL;
12430
12431                         offset = tg3_nvram_logical_addr(tp, offset);
12432                 }
12433         }
12434
12435         if (!offset || !len) {
12436                 offset = TG3_NVM_VPD_OFF;
12437                 len = TG3_NVM_VPD_LEN;
12438         }
12439
12440         buf = kmalloc(len, GFP_KERNEL);
12441         if (buf == NULL)
12442                 return NULL;
12443
12444         if (magic == TG3_EEPROM_MAGIC) {
12445                 for (i = 0; i < len; i += 4) {
12446                         /* The data is in little-endian format in NVRAM.
12447                          * Use the big-endian read routines to preserve
12448                          * the byte order as it exists in NVRAM.
12449                          */
12450                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12451                                 goto error;
12452                 }
12453         } else {
12454                 u8 *ptr;
12455                 ssize_t cnt;
12456                 unsigned int pos = 0;
12457
12458                 ptr = (u8 *)&buf[0];
12459                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12460                         cnt = pci_read_vpd(tp->pdev, pos,
12461                                            len - pos, ptr);
12462                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12463                                 cnt = 0;
12464                         else if (cnt < 0)
12465                                 goto error;
12466                 }
12467                 if (pos != len)
12468                         goto error;
12469         }
12470
12471         *vpdlen = len;
12472
12473         return buf;
12474
12475 error:
12476         kfree(buf);
12477         return NULL;
12478 }
12479
12480 #define NVRAM_TEST_SIZE 0x100
12481 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12482 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12483 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12484 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12485 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12486 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12487 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12488 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12489
12490 static int tg3_test_nvram(struct tg3 *tp)
12491 {
12492         u32 csum, magic, len;
12493         __be32 *buf;
12494         int i, j, k, err = 0, size;
12495
12496         if (tg3_flag(tp, NO_NVRAM))
12497                 return 0;
12498
12499         if (tg3_nvram_read(tp, 0, &magic) != 0)
12500                 return -EIO;
12501
12502         if (magic == TG3_EEPROM_MAGIC)
12503                 size = NVRAM_TEST_SIZE;
12504         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12505                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12506                     TG3_EEPROM_SB_FORMAT_1) {
12507                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12508                         case TG3_EEPROM_SB_REVISION_0:
12509                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12510                                 break;
12511                         case TG3_EEPROM_SB_REVISION_2:
12512                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12513                                 break;
12514                         case TG3_EEPROM_SB_REVISION_3:
12515                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12516                                 break;
12517                         case TG3_EEPROM_SB_REVISION_4:
12518                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12519                                 break;
12520                         case TG3_EEPROM_SB_REVISION_5:
12521                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12522                                 break;
12523                         case TG3_EEPROM_SB_REVISION_6:
12524                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12525                                 break;
12526                         default:
12527                                 return -EIO;
12528                         }
12529                 } else
12530                         return 0;
12531         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12532                 size = NVRAM_SELFBOOT_HW_SIZE;
12533         else
12534                 return -EIO;
12535
12536         buf = kmalloc(size, GFP_KERNEL);
12537         if (buf == NULL)
12538                 return -ENOMEM;
12539
12540         err = -EIO;
12541         for (i = 0, j = 0; i < size; i += 4, j++) {
12542                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12543                 if (err)
12544                         break;
12545         }
12546         if (i < size)
12547                 goto out;
12548
12549         /* Selfboot format */
12550         magic = be32_to_cpu(buf[0]);
12551         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12552             TG3_EEPROM_MAGIC_FW) {
12553                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12554
12555                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12556                     TG3_EEPROM_SB_REVISION_2) {
12557                         /* For rev 2, the csum doesn't include the MBA. */
12558                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12559                                 csum8 += buf8[i];
12560                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12561                                 csum8 += buf8[i];
12562                 } else {
12563                         for (i = 0; i < size; i++)
12564                                 csum8 += buf8[i];
12565                 }
12566
12567                 if (csum8 == 0) {
12568                         err = 0;
12569                         goto out;
12570                 }
12571
12572                 err = -EIO;
12573                 goto out;
12574         }
12575
12576         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12577             TG3_EEPROM_MAGIC_HW) {
12578                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12579                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12580                 u8 *buf8 = (u8 *) buf;
12581
12582                 /* Separate the parity bits and the data bytes.  */
12583                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12584                         if ((i == 0) || (i == 8)) {
12585                                 int l;
12586                                 u8 msk;
12587
12588                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12589                                         parity[k++] = buf8[i] & msk;
12590                                 i++;
12591                         } else if (i == 16) {
12592                                 int l;
12593                                 u8 msk;
12594
12595                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12596                                         parity[k++] = buf8[i] & msk;
12597                                 i++;
12598
12599                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12600                                         parity[k++] = buf8[i] & msk;
12601                                 i++;
12602                         }
12603                         data[j++] = buf8[i];
12604                 }
12605
12606                 err = -EIO;
12607                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12608                         u8 hw8 = hweight8(data[i]);
12609
12610                         if ((hw8 & 0x1) && parity[i])
12611                                 goto out;
12612                         else if (!(hw8 & 0x1) && !parity[i])
12613                                 goto out;
12614                 }
12615                 err = 0;
12616                 goto out;
12617         }
12618
12619         err = -EIO;
12620
12621         /* Bootstrap checksum at offset 0x10 */
12622         csum = calc_crc((unsigned char *) buf, 0x10);
12623         if (csum != le32_to_cpu(buf[0x10/4]))
12624                 goto out;
12625
12626         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12627         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12628         if (csum != le32_to_cpu(buf[0xfc/4]))
12629                 goto out;
12630
12631         kfree(buf);
12632
12633         buf = tg3_vpd_readblock(tp, &len);
12634         if (!buf)
12635                 return -ENOMEM;
12636
12637         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12638         if (i > 0) {
12639                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12640                 if (j < 0)
12641                         goto out;
12642
12643                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12644                         goto out;
12645
12646                 i += PCI_VPD_LRDT_TAG_SIZE;
12647                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12648                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12649                 if (j > 0) {
12650                         u8 csum8 = 0;
12651
12652                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12653
12654                         for (i = 0; i <= j; i++)
12655                                 csum8 += ((u8 *)buf)[i];
12656
12657                         if (csum8)
12658                                 goto out;
12659                 }
12660         }
12661
12662         err = 0;
12663
12664 out:
12665         kfree(buf);
12666         return err;
12667 }
12668
12669 #define TG3_SERDES_TIMEOUT_SEC  2
12670 #define TG3_COPPER_TIMEOUT_SEC  6
12671
12672 static int tg3_test_link(struct tg3 *tp)
12673 {
12674         int i, max;
12675
12676         if (!netif_running(tp->dev))
12677                 return -ENODEV;
12678
12679         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12680                 max = TG3_SERDES_TIMEOUT_SEC;
12681         else
12682                 max = TG3_COPPER_TIMEOUT_SEC;
12683
12684         for (i = 0; i < max; i++) {
12685                 if (tp->link_up)
12686                         return 0;
12687
12688                 if (msleep_interruptible(1000))
12689                         break;
12690         }
12691
12692         return -EIO;
12693 }
12694
12695 /* Only test the commonly used registers */
12696 static int tg3_test_registers(struct tg3 *tp)
12697 {
12698         int i, is_5705, is_5750;
12699         u32 offset, read_mask, write_mask, val, save_val, read_val;
12700         static struct {
12701                 u16 offset;
12702                 u16 flags;
12703 #define TG3_FL_5705     0x1
12704 #define TG3_FL_NOT_5705 0x2
12705 #define TG3_FL_NOT_5788 0x4
12706 #define TG3_FL_NOT_5750 0x8
12707                 u32 read_mask;
12708                 u32 write_mask;
12709         } reg_tbl[] = {
12710                 /* MAC Control Registers */
12711                 { MAC_MODE, TG3_FL_NOT_5705,
12712                         0x00000000, 0x00ef6f8c },
12713                 { MAC_MODE, TG3_FL_5705,
12714                         0x00000000, 0x01ef6b8c },
12715                 { MAC_STATUS, TG3_FL_NOT_5705,
12716                         0x03800107, 0x00000000 },
12717                 { MAC_STATUS, TG3_FL_5705,
12718                         0x03800100, 0x00000000 },
12719                 { MAC_ADDR_0_HIGH, 0x0000,
12720                         0x00000000, 0x0000ffff },
12721                 { MAC_ADDR_0_LOW, 0x0000,
12722                         0x00000000, 0xffffffff },
12723                 { MAC_RX_MTU_SIZE, 0x0000,
12724                         0x00000000, 0x0000ffff },
12725                 { MAC_TX_MODE, 0x0000,
12726                         0x00000000, 0x00000070 },
12727                 { MAC_TX_LENGTHS, 0x0000,
12728                         0x00000000, 0x00003fff },
12729                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12730                         0x00000000, 0x000007fc },
12731                 { MAC_RX_MODE, TG3_FL_5705,
12732                         0x00000000, 0x000007dc },
12733                 { MAC_HASH_REG_0, 0x0000,
12734                         0x00000000, 0xffffffff },
12735                 { MAC_HASH_REG_1, 0x0000,
12736                         0x00000000, 0xffffffff },
12737                 { MAC_HASH_REG_2, 0x0000,
12738                         0x00000000, 0xffffffff },
12739                 { MAC_HASH_REG_3, 0x0000,
12740                         0x00000000, 0xffffffff },
12741
12742                 /* Receive Data and Receive BD Initiator Control Registers. */
12743                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12744                         0x00000000, 0xffffffff },
12745                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12746                         0x00000000, 0xffffffff },
12747                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12748                         0x00000000, 0x00000003 },
12749                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12750                         0x00000000, 0xffffffff },
12751                 { RCVDBDI_STD_BD+0, 0x0000,
12752                         0x00000000, 0xffffffff },
12753                 { RCVDBDI_STD_BD+4, 0x0000,
12754                         0x00000000, 0xffffffff },
12755                 { RCVDBDI_STD_BD+8, 0x0000,
12756                         0x00000000, 0xffff0002 },
12757                 { RCVDBDI_STD_BD+0xc, 0x0000,
12758                         0x00000000, 0xffffffff },
12759
12760                 /* Receive BD Initiator Control Registers. */
12761                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12762                         0x00000000, 0xffffffff },
12763                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12764                         0x00000000, 0x000003ff },
12765                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12766                         0x00000000, 0xffffffff },
12767
12768                 /* Host Coalescing Control Registers. */
12769                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12770                         0x00000000, 0x00000004 },
12771                 { HOSTCC_MODE, TG3_FL_5705,
12772                         0x00000000, 0x000000f6 },
12773                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12774                         0x00000000, 0xffffffff },
12775                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12776                         0x00000000, 0x000003ff },
12777                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12778                         0x00000000, 0xffffffff },
12779                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12780                         0x00000000, 0x000003ff },
12781                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12782                         0x00000000, 0xffffffff },
12783                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12784                         0x00000000, 0x000000ff },
12785                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12786                         0x00000000, 0xffffffff },
12787                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12788                         0x00000000, 0x000000ff },
12789                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12790                         0x00000000, 0xffffffff },
12791                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12792                         0x00000000, 0xffffffff },
12793                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12794                         0x00000000, 0xffffffff },
12795                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12796                         0x00000000, 0x000000ff },
12797                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12798                         0x00000000, 0xffffffff },
12799                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12800                         0x00000000, 0x000000ff },
12801                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12802                         0x00000000, 0xffffffff },
12803                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12804                         0x00000000, 0xffffffff },
12805                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12806                         0x00000000, 0xffffffff },
12807                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12808                         0x00000000, 0xffffffff },
12809                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12810                         0x00000000, 0xffffffff },
12811                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12812                         0xffffffff, 0x00000000 },
12813                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12814                         0xffffffff, 0x00000000 },
12815
12816                 /* Buffer Manager Control Registers. */
12817                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12818                         0x00000000, 0x007fff80 },
12819                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12820                         0x00000000, 0x007fffff },
12821                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12822                         0x00000000, 0x0000003f },
12823                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12824                         0x00000000, 0x000001ff },
12825                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12826                         0x00000000, 0x000001ff },
12827                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12828                         0xffffffff, 0x00000000 },
12829                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12830                         0xffffffff, 0x00000000 },
12831
12832                 /* Mailbox Registers */
12833                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12834                         0x00000000, 0x000001ff },
12835                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12836                         0x00000000, 0x000001ff },
12837                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12838                         0x00000000, 0x000007ff },
12839                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12840                         0x00000000, 0x000001ff },
12841
12842                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12843         };
12844
12845         is_5705 = is_5750 = 0;
12846         if (tg3_flag(tp, 5705_PLUS)) {
12847                 is_5705 = 1;
12848                 if (tg3_flag(tp, 5750_PLUS))
12849                         is_5750 = 1;
12850         }
12851
12852         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12853                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12854                         continue;
12855
12856                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12857                         continue;
12858
12859                 if (tg3_flag(tp, IS_5788) &&
12860                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12861                         continue;
12862
12863                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12864                         continue;
12865
12866                 offset = (u32) reg_tbl[i].offset;
12867                 read_mask = reg_tbl[i].read_mask;
12868                 write_mask = reg_tbl[i].write_mask;
12869
12870                 /* Save the original register content */
12871                 save_val = tr32(offset);
12872
12873                 /* Determine the read-only value. */
12874                 read_val = save_val & read_mask;
12875
12876                 /* Write zero to the register, then make sure the read-only bits
12877                  * are not changed and the read/write bits are all zeros.
12878                  */
12879                 tw32(offset, 0);
12880
12881                 val = tr32(offset);
12882
12883                 /* Test the read-only and read/write bits. */
12884                 if (((val & read_mask) != read_val) || (val & write_mask))
12885                         goto out;
12886
12887                 /* Write ones to all the bits defined by RdMask and WrMask, then
12888                  * make sure the read-only bits are not changed and the
12889                  * read/write bits are all ones.
12890                  */
12891                 tw32(offset, read_mask | write_mask);
12892
12893                 val = tr32(offset);
12894
12895                 /* Test the read-only bits. */
12896                 if ((val & read_mask) != read_val)
12897                         goto out;
12898
12899                 /* Test the read/write bits. */
12900                 if ((val & write_mask) != write_mask)
12901                         goto out;
12902
12903                 tw32(offset, save_val);
12904         }
12905
12906         return 0;
12907
12908 out:
12909         if (netif_msg_hw(tp))
12910                 netdev_err(tp->dev,
12911                            "Register test failed at offset %x\n", offset);
12912         tw32(offset, save_val);
12913         return -EIO;
12914 }
12915
12916 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12917 {
12918         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12919         int i;
12920         u32 j;
12921
12922         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12923                 for (j = 0; j < len; j += 4) {
12924                         u32 val;
12925
12926                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12927                         tg3_read_mem(tp, offset + j, &val);
12928                         if (val != test_pattern[i])
12929                                 return -EIO;
12930                 }
12931         }
12932         return 0;
12933 }
12934
12935 static int tg3_test_memory(struct tg3 *tp)
12936 {
12937         static struct mem_entry {
12938                 u32 offset;
12939                 u32 len;
12940         } mem_tbl_570x[] = {
12941                 { 0x00000000, 0x00b50},
12942                 { 0x00002000, 0x1c000},
12943                 { 0xffffffff, 0x00000}
12944         }, mem_tbl_5705[] = {
12945                 { 0x00000100, 0x0000c},
12946                 { 0x00000200, 0x00008},
12947                 { 0x00004000, 0x00800},
12948                 { 0x00006000, 0x01000},
12949                 { 0x00008000, 0x02000},
12950                 { 0x00010000, 0x0e000},
12951                 { 0xffffffff, 0x00000}
12952         }, mem_tbl_5755[] = {
12953                 { 0x00000200, 0x00008},
12954                 { 0x00004000, 0x00800},
12955                 { 0x00006000, 0x00800},
12956                 { 0x00008000, 0x02000},
12957                 { 0x00010000, 0x0c000},
12958                 { 0xffffffff, 0x00000}
12959         }, mem_tbl_5906[] = {
12960                 { 0x00000200, 0x00008},
12961                 { 0x00004000, 0x00400},
12962                 { 0x00006000, 0x00400},
12963                 { 0x00008000, 0x01000},
12964                 { 0x00010000, 0x01000},
12965                 { 0xffffffff, 0x00000}
12966         }, mem_tbl_5717[] = {
12967                 { 0x00000200, 0x00008},
12968                 { 0x00010000, 0x0a000},
12969                 { 0x00020000, 0x13c00},
12970                 { 0xffffffff, 0x00000}
12971         }, mem_tbl_57765[] = {
12972                 { 0x00000200, 0x00008},
12973                 { 0x00004000, 0x00800},
12974                 { 0x00006000, 0x09800},
12975                 { 0x00010000, 0x0a000},
12976                 { 0xffffffff, 0x00000}
12977         };
12978         struct mem_entry *mem_tbl;
12979         int err = 0;
12980         int i;
12981
12982         if (tg3_flag(tp, 5717_PLUS))
12983                 mem_tbl = mem_tbl_5717;
12984         else if (tg3_flag(tp, 57765_CLASS) ||
12985                  tg3_asic_rev(tp) == ASIC_REV_5762)
12986                 mem_tbl = mem_tbl_57765;
12987         else if (tg3_flag(tp, 5755_PLUS))
12988                 mem_tbl = mem_tbl_5755;
12989         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12990                 mem_tbl = mem_tbl_5906;
12991         else if (tg3_flag(tp, 5705_PLUS))
12992                 mem_tbl = mem_tbl_5705;
12993         else
12994                 mem_tbl = mem_tbl_570x;
12995
12996         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12997                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12998                 if (err)
12999                         break;
13000         }
13001
13002         return err;
13003 }
13004
13005 #define TG3_TSO_MSS             500
13006
13007 #define TG3_TSO_IP_HDR_LEN      20
13008 #define TG3_TSO_TCP_HDR_LEN     20
13009 #define TG3_TSO_TCP_OPT_LEN     12
13010
13011 static const u8 tg3_tso_header[] = {
13012 0x08, 0x00,
13013 0x45, 0x00, 0x00, 0x00,
13014 0x00, 0x00, 0x40, 0x00,
13015 0x40, 0x06, 0x00, 0x00,
13016 0x0a, 0x00, 0x00, 0x01,
13017 0x0a, 0x00, 0x00, 0x02,
13018 0x0d, 0x00, 0xe0, 0x00,
13019 0x00, 0x00, 0x01, 0x00,
13020 0x00, 0x00, 0x02, 0x00,
13021 0x80, 0x10, 0x10, 0x00,
13022 0x14, 0x09, 0x00, 0x00,
13023 0x01, 0x01, 0x08, 0x0a,
13024 0x11, 0x11, 0x11, 0x11,
13025 0x11, 0x11, 0x11, 0x11,
13026 };
13027
13028 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13029 {
13030         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13031         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13032         u32 budget;
13033         struct sk_buff *skb;
13034         u8 *tx_data, *rx_data;
13035         dma_addr_t map;
13036         int num_pkts, tx_len, rx_len, i, err;
13037         struct tg3_rx_buffer_desc *desc;
13038         struct tg3_napi *tnapi, *rnapi;
13039         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13040
13041         tnapi = &tp->napi[0];
13042         rnapi = &tp->napi[0];
13043         if (tp->irq_cnt > 1) {
13044                 if (tg3_flag(tp, ENABLE_RSS))
13045                         rnapi = &tp->napi[1];
13046                 if (tg3_flag(tp, ENABLE_TSS))
13047                         tnapi = &tp->napi[1];
13048         }
13049         coal_now = tnapi->coal_now | rnapi->coal_now;
13050
13051         err = -EIO;
13052
13053         tx_len = pktsz;
13054         skb = netdev_alloc_skb(tp->dev, tx_len);
13055         if (!skb)
13056                 return -ENOMEM;
13057
13058         tx_data = skb_put(skb, tx_len);
13059         memcpy(tx_data, tp->dev->dev_addr, 6);
13060         memset(tx_data + 6, 0x0, 8);
13061
13062         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13063
13064         if (tso_loopback) {
13065                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13066
13067                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13068                               TG3_TSO_TCP_OPT_LEN;
13069
13070                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13071                        sizeof(tg3_tso_header));
13072                 mss = TG3_TSO_MSS;
13073
13074                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13075                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13076
13077                 /* Set the total length field in the IP header */
13078                 iph->tot_len = htons((u16)(mss + hdr_len));
13079
13080                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13081                               TXD_FLAG_CPU_POST_DMA);
13082
13083                 if (tg3_flag(tp, HW_TSO_1) ||
13084                     tg3_flag(tp, HW_TSO_2) ||
13085                     tg3_flag(tp, HW_TSO_3)) {
13086                         struct tcphdr *th;
13087                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13088                         th = (struct tcphdr *)&tx_data[val];
13089                         th->check = 0;
13090                 } else
13091                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13092
13093                 if (tg3_flag(tp, HW_TSO_3)) {
13094                         mss |= (hdr_len & 0xc) << 12;
13095                         if (hdr_len & 0x10)
13096                                 base_flags |= 0x00000010;
13097                         base_flags |= (hdr_len & 0x3e0) << 5;
13098                 } else if (tg3_flag(tp, HW_TSO_2))
13099                         mss |= hdr_len << 9;
13100                 else if (tg3_flag(tp, HW_TSO_1) ||
13101                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13102                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13103                 } else {
13104                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13105                 }
13106
13107                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13108         } else {
13109                 num_pkts = 1;
13110                 data_off = ETH_HLEN;
13111
13112                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13113                     tx_len > VLAN_ETH_FRAME_LEN)
13114                         base_flags |= TXD_FLAG_JMB_PKT;
13115         }
13116
13117         for (i = data_off; i < tx_len; i++)
13118                 tx_data[i] = (u8) (i & 0xff);
13119
13120         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13121         if (pci_dma_mapping_error(tp->pdev, map)) {
13122                 dev_kfree_skb(skb);
13123                 return -EIO;
13124         }
13125
13126         val = tnapi->tx_prod;
13127         tnapi->tx_buffers[val].skb = skb;
13128         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13129
13130         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13131                rnapi->coal_now);
13132
13133         udelay(10);
13134
13135         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13136
13137         budget = tg3_tx_avail(tnapi);
13138         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13139                             base_flags | TXD_FLAG_END, mss, 0)) {
13140                 tnapi->tx_buffers[val].skb = NULL;
13141                 dev_kfree_skb(skb);
13142                 return -EIO;
13143         }
13144
13145         tnapi->tx_prod++;
13146
13147         /* Sync BD data before updating mailbox */
13148         wmb();
13149
13150         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13151         tr32_mailbox(tnapi->prodmbox);
13152
13153         udelay(10);
13154
13155         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13156         for (i = 0; i < 35; i++) {
13157                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13158                        coal_now);
13159
13160                 udelay(10);
13161
13162                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13163                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13164                 if ((tx_idx == tnapi->tx_prod) &&
13165                     (rx_idx == (rx_start_idx + num_pkts)))
13166                         break;
13167         }
13168
13169         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13170         dev_kfree_skb(skb);
13171
13172         if (tx_idx != tnapi->tx_prod)
13173                 goto out;
13174
13175         if (rx_idx != rx_start_idx + num_pkts)
13176                 goto out;
13177
13178         val = data_off;
13179         while (rx_idx != rx_start_idx) {
13180                 desc = &rnapi->rx_rcb[rx_start_idx++];
13181                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13182                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13183
13184                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13185                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13186                         goto out;
13187
13188                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13189                          - ETH_FCS_LEN;
13190
13191                 if (!tso_loopback) {
13192                         if (rx_len != tx_len)
13193                                 goto out;
13194
13195                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13196                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13197                                         goto out;
13198                         } else {
13199                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13200                                         goto out;
13201                         }
13202                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13203                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13204                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13205                         goto out;
13206                 }
13207
13208                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13209                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13210                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13211                                              mapping);
13212                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13213                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13214                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13215                                              mapping);
13216                 } else
13217                         goto out;
13218
13219                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13220                                             PCI_DMA_FROMDEVICE);
13221
13222                 rx_data += TG3_RX_OFFSET(tp);
13223                 for (i = data_off; i < rx_len; i++, val++) {
13224                         if (*(rx_data + i) != (u8) (val & 0xff))
13225                                 goto out;
13226                 }
13227         }
13228
13229         err = 0;
13230
13231         /* tg3_free_rings will unmap and free the rx_data */
13232 out:
13233         return err;
13234 }
13235
13236 #define TG3_STD_LOOPBACK_FAILED         1
13237 #define TG3_JMB_LOOPBACK_FAILED         2
13238 #define TG3_TSO_LOOPBACK_FAILED         4
13239 #define TG3_LOOPBACK_FAILED \
13240         (TG3_STD_LOOPBACK_FAILED | \
13241          TG3_JMB_LOOPBACK_FAILED | \
13242          TG3_TSO_LOOPBACK_FAILED)
13243
13244 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13245 {
13246         int err = -EIO;
13247         u32 eee_cap;
13248         u32 jmb_pkt_sz = 9000;
13249
13250         if (tp->dma_limit)
13251                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13252
13253         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13254         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13255
13256         if (!netif_running(tp->dev)) {
13257                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13258                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13259                 if (do_extlpbk)
13260                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13261                 goto done;
13262         }
13263
13264         err = tg3_reset_hw(tp, true);
13265         if (err) {
13266                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13267                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13268                 if (do_extlpbk)
13269                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13270                 goto done;
13271         }
13272
13273         if (tg3_flag(tp, ENABLE_RSS)) {
13274                 int i;
13275
13276                 /* Reroute all rx packets to the 1st queue */
13277                 for (i = MAC_RSS_INDIR_TBL_0;
13278                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13279                         tw32(i, 0x0);
13280         }
13281
13282         /* HW errata - mac loopback fails in some cases on 5780.
13283          * Normal traffic and PHY loopback are not affected by
13284          * errata.  Also, the MAC loopback test is deprecated for
13285          * all newer ASIC revisions.
13286          */
13287         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13288             !tg3_flag(tp, CPMU_PRESENT)) {
13289                 tg3_mac_loopback(tp, true);
13290
13291                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13292                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13293
13294                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13295                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13296                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13297
13298                 tg3_mac_loopback(tp, false);
13299         }
13300
13301         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13302             !tg3_flag(tp, USE_PHYLIB)) {
13303                 int i;
13304
13305                 tg3_phy_lpbk_set(tp, 0, false);
13306
13307                 /* Wait for link */
13308                 for (i = 0; i < 100; i++) {
13309                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13310                                 break;
13311                         mdelay(1);
13312                 }
13313
13314                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13315                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13316                 if (tg3_flag(tp, TSO_CAPABLE) &&
13317                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13318                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13319                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13320                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13321                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13322
13323                 if (do_extlpbk) {
13324                         tg3_phy_lpbk_set(tp, 0, true);
13325
13326                         /* All link indications report up, but the hardware
13327                          * isn't really ready for about 20 msec.  Double it
13328                          * to be sure.
13329                          */
13330                         mdelay(40);
13331
13332                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13333                                 data[TG3_EXT_LOOPB_TEST] |=
13334                                                         TG3_STD_LOOPBACK_FAILED;
13335                         if (tg3_flag(tp, TSO_CAPABLE) &&
13336                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13337                                 data[TG3_EXT_LOOPB_TEST] |=
13338                                                         TG3_TSO_LOOPBACK_FAILED;
13339                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13340                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13341                                 data[TG3_EXT_LOOPB_TEST] |=
13342                                                         TG3_JMB_LOOPBACK_FAILED;
13343                 }
13344
13345                 /* Re-enable gphy autopowerdown. */
13346                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13347                         tg3_phy_toggle_apd(tp, true);
13348         }
13349
13350         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13351                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13352
13353 done:
13354         tp->phy_flags |= eee_cap;
13355
13356         return err;
13357 }
13358
13359 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13360                           u64 *data)
13361 {
13362         struct tg3 *tp = netdev_priv(dev);
13363         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13364
13365         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13366             tg3_power_up(tp)) {
13367                 etest->flags |= ETH_TEST_FL_FAILED;
13368                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13369                 return;
13370         }
13371
13372         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13373
13374         if (tg3_test_nvram(tp) != 0) {
13375                 etest->flags |= ETH_TEST_FL_FAILED;
13376                 data[TG3_NVRAM_TEST] = 1;
13377         }
13378         if (!doextlpbk && tg3_test_link(tp)) {
13379                 etest->flags |= ETH_TEST_FL_FAILED;
13380                 data[TG3_LINK_TEST] = 1;
13381         }
13382         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13383                 int err, err2 = 0, irq_sync = 0;
13384
13385                 if (netif_running(dev)) {
13386                         tg3_phy_stop(tp);
13387                         tg3_netif_stop(tp);
13388                         irq_sync = 1;
13389                 }
13390
13391                 tg3_full_lock(tp, irq_sync);
13392                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13393                 err = tg3_nvram_lock(tp);
13394                 tg3_halt_cpu(tp, RX_CPU_BASE);
13395                 if (!tg3_flag(tp, 5705_PLUS))
13396                         tg3_halt_cpu(tp, TX_CPU_BASE);
13397                 if (!err)
13398                         tg3_nvram_unlock(tp);
13399
13400                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13401                         tg3_phy_reset(tp);
13402
13403                 if (tg3_test_registers(tp) != 0) {
13404                         etest->flags |= ETH_TEST_FL_FAILED;
13405                         data[TG3_REGISTER_TEST] = 1;
13406                 }
13407
13408                 if (tg3_test_memory(tp) != 0) {
13409                         etest->flags |= ETH_TEST_FL_FAILED;
13410                         data[TG3_MEMORY_TEST] = 1;
13411                 }
13412
13413                 if (doextlpbk)
13414                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13415
13416                 if (tg3_test_loopback(tp, data, doextlpbk))
13417                         etest->flags |= ETH_TEST_FL_FAILED;
13418
13419                 tg3_full_unlock(tp);
13420
13421                 if (tg3_test_interrupt(tp) != 0) {
13422                         etest->flags |= ETH_TEST_FL_FAILED;
13423                         data[TG3_INTERRUPT_TEST] = 1;
13424                 }
13425
13426                 tg3_full_lock(tp, 0);
13427
13428                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13429                 if (netif_running(dev)) {
13430                         tg3_flag_set(tp, INIT_COMPLETE);
13431                         err2 = tg3_restart_hw(tp, true);
13432                         if (!err2)
13433                                 tg3_netif_start(tp);
13434                 }
13435
13436                 tg3_full_unlock(tp);
13437
13438                 if (irq_sync && !err2)
13439                         tg3_phy_start(tp);
13440         }
13441         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13442                 tg3_power_down(tp);
13443
13444 }
13445
13446 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13447                               struct ifreq *ifr, int cmd)
13448 {
13449         struct tg3 *tp = netdev_priv(dev);
13450         struct hwtstamp_config stmpconf;
13451
13452         if (!tg3_flag(tp, PTP_CAPABLE))
13453                 return -EINVAL;
13454
13455         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13456                 return -EFAULT;
13457
13458         if (stmpconf.flags)
13459                 return -EINVAL;
13460
13461         switch (stmpconf.tx_type) {
13462         case HWTSTAMP_TX_ON:
13463                 tg3_flag_set(tp, TX_TSTAMP_EN);
13464                 break;
13465         case HWTSTAMP_TX_OFF:
13466                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13467                 break;
13468         default:
13469                 return -ERANGE;
13470         }
13471
13472         switch (stmpconf.rx_filter) {
13473         case HWTSTAMP_FILTER_NONE:
13474                 tp->rxptpctl = 0;
13475                 break;
13476         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13477                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13478                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13479                 break;
13480         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13481                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13482                                TG3_RX_PTP_CTL_SYNC_EVNT;
13483                 break;
13484         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13485                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13486                                TG3_RX_PTP_CTL_DELAY_REQ;
13487                 break;
13488         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13489                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13490                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13491                 break;
13492         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13493                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13494                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13495                 break;
13496         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13497                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13498                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13499                 break;
13500         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13501                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13502                                TG3_RX_PTP_CTL_SYNC_EVNT;
13503                 break;
13504         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13505                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13506                                TG3_RX_PTP_CTL_SYNC_EVNT;
13507                 break;
13508         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13509                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13510                                TG3_RX_PTP_CTL_SYNC_EVNT;
13511                 break;
13512         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13513                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13514                                TG3_RX_PTP_CTL_DELAY_REQ;
13515                 break;
13516         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13517                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13518                                TG3_RX_PTP_CTL_DELAY_REQ;
13519                 break;
13520         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13521                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13522                                TG3_RX_PTP_CTL_DELAY_REQ;
13523                 break;
13524         default:
13525                 return -ERANGE;
13526         }
13527
13528         if (netif_running(dev) && tp->rxptpctl)
13529                 tw32(TG3_RX_PTP_CTL,
13530                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13531
13532         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13533                 -EFAULT : 0;
13534 }
13535
13536 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13537 {
13538         struct mii_ioctl_data *data = if_mii(ifr);
13539         struct tg3 *tp = netdev_priv(dev);
13540         int err;
13541
13542         if (tg3_flag(tp, USE_PHYLIB)) {
13543                 struct phy_device *phydev;
13544                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13545                         return -EAGAIN;
13546                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13547                 return phy_mii_ioctl(phydev, ifr, cmd);
13548         }
13549
13550         switch (cmd) {
13551         case SIOCGMIIPHY:
13552                 data->phy_id = tp->phy_addr;
13553
13554                 /* fallthru */
13555         case SIOCGMIIREG: {
13556                 u32 mii_regval;
13557
13558                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13559                         break;                  /* We have no PHY */
13560
13561                 if (!netif_running(dev))
13562                         return -EAGAIN;
13563
13564                 spin_lock_bh(&tp->lock);
13565                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13566                                     data->reg_num & 0x1f, &mii_regval);
13567                 spin_unlock_bh(&tp->lock);
13568
13569                 data->val_out = mii_regval;
13570
13571                 return err;
13572         }
13573
13574         case SIOCSMIIREG:
13575                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13576                         break;                  /* We have no PHY */
13577
13578                 if (!netif_running(dev))
13579                         return -EAGAIN;
13580
13581                 spin_lock_bh(&tp->lock);
13582                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13583                                      data->reg_num & 0x1f, data->val_in);
13584                 spin_unlock_bh(&tp->lock);
13585
13586                 return err;
13587
13588         case SIOCSHWTSTAMP:
13589                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13590
13591         default:
13592                 /* do nothing */
13593                 break;
13594         }
13595         return -EOPNOTSUPP;
13596 }
13597
13598 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13599 {
13600         struct tg3 *tp = netdev_priv(dev);
13601
13602         memcpy(ec, &tp->coal, sizeof(*ec));
13603         return 0;
13604 }
13605
13606 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13607 {
13608         struct tg3 *tp = netdev_priv(dev);
13609         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13610         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13611
13612         if (!tg3_flag(tp, 5705_PLUS)) {
13613                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13614                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13615                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13616                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13617         }
13618
13619         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13620             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13621             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13622             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13623             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13624             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13625             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13626             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13627             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13628             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13629                 return -EINVAL;
13630
13631         /* No rx interrupts will be generated if both are zero */
13632         if ((ec->rx_coalesce_usecs == 0) &&
13633             (ec->rx_max_coalesced_frames == 0))
13634                 return -EINVAL;
13635
13636         /* No tx interrupts will be generated if both are zero */
13637         if ((ec->tx_coalesce_usecs == 0) &&
13638             (ec->tx_max_coalesced_frames == 0))
13639                 return -EINVAL;
13640
13641         /* Only copy relevant parameters, ignore all others. */
13642         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13643         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13644         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13645         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13646         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13647         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13648         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13649         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13650         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13651
13652         if (netif_running(dev)) {
13653                 tg3_full_lock(tp, 0);
13654                 __tg3_set_coalesce(tp, &tp->coal);
13655                 tg3_full_unlock(tp);
13656         }
13657         return 0;
13658 }
13659
13660 static const struct ethtool_ops tg3_ethtool_ops = {
13661         .get_settings           = tg3_get_settings,
13662         .set_settings           = tg3_set_settings,
13663         .get_drvinfo            = tg3_get_drvinfo,
13664         .get_regs_len           = tg3_get_regs_len,
13665         .get_regs               = tg3_get_regs,
13666         .get_wol                = tg3_get_wol,
13667         .set_wol                = tg3_set_wol,
13668         .get_msglevel           = tg3_get_msglevel,
13669         .set_msglevel           = tg3_set_msglevel,
13670         .nway_reset             = tg3_nway_reset,
13671         .get_link               = ethtool_op_get_link,
13672         .get_eeprom_len         = tg3_get_eeprom_len,
13673         .get_eeprom             = tg3_get_eeprom,
13674         .set_eeprom             = tg3_set_eeprom,
13675         .get_ringparam          = tg3_get_ringparam,
13676         .set_ringparam          = tg3_set_ringparam,
13677         .get_pauseparam         = tg3_get_pauseparam,
13678         .set_pauseparam         = tg3_set_pauseparam,
13679         .self_test              = tg3_self_test,
13680         .get_strings            = tg3_get_strings,
13681         .set_phys_id            = tg3_set_phys_id,
13682         .get_ethtool_stats      = tg3_get_ethtool_stats,
13683         .get_coalesce           = tg3_get_coalesce,
13684         .set_coalesce           = tg3_set_coalesce,
13685         .get_sset_count         = tg3_get_sset_count,
13686         .get_rxnfc              = tg3_get_rxnfc,
13687         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13688         .get_rxfh_indir         = tg3_get_rxfh_indir,
13689         .set_rxfh_indir         = tg3_set_rxfh_indir,
13690         .get_channels           = tg3_get_channels,
13691         .set_channels           = tg3_set_channels,
13692         .get_ts_info            = tg3_get_ts_info,
13693 };
13694
13695 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13696                                                 struct rtnl_link_stats64 *stats)
13697 {
13698         struct tg3 *tp = netdev_priv(dev);
13699
13700         spin_lock_bh(&tp->lock);
13701         if (!tp->hw_stats) {
13702                 spin_unlock_bh(&tp->lock);
13703                 return &tp->net_stats_prev;
13704         }
13705
13706         tg3_get_nstats(tp, stats);
13707         spin_unlock_bh(&tp->lock);
13708
13709         return stats;
13710 }
13711
13712 static void tg3_set_rx_mode(struct net_device *dev)
13713 {
13714         struct tg3 *tp = netdev_priv(dev);
13715
13716         if (!netif_running(dev))
13717                 return;
13718
13719         tg3_full_lock(tp, 0);
13720         __tg3_set_rx_mode(dev);
13721         tg3_full_unlock(tp);
13722 }
13723
13724 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13725                                int new_mtu)
13726 {
13727         dev->mtu = new_mtu;
13728
13729         if (new_mtu > ETH_DATA_LEN) {
13730                 if (tg3_flag(tp, 5780_CLASS)) {
13731                         netdev_update_features(dev);
13732                         tg3_flag_clear(tp, TSO_CAPABLE);
13733                 } else {
13734                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13735                 }
13736         } else {
13737                 if (tg3_flag(tp, 5780_CLASS)) {
13738                         tg3_flag_set(tp, TSO_CAPABLE);
13739                         netdev_update_features(dev);
13740                 }
13741                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13742         }
13743 }
13744
13745 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13746 {
13747         struct tg3 *tp = netdev_priv(dev);
13748         int err;
13749         bool reset_phy = false;
13750
13751         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13752                 return -EINVAL;
13753
13754         if (!netif_running(dev)) {
13755                 /* We'll just catch it later when the
13756                  * device is up'd.
13757                  */
13758                 tg3_set_mtu(dev, tp, new_mtu);
13759                 return 0;
13760         }
13761
13762         tg3_phy_stop(tp);
13763
13764         tg3_netif_stop(tp);
13765
13766         tg3_full_lock(tp, 1);
13767
13768         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13769
13770         tg3_set_mtu(dev, tp, new_mtu);
13771
13772         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13773          * breaks all requests to 256 bytes.
13774          */
13775         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13776                 reset_phy = true;
13777
13778         err = tg3_restart_hw(tp, reset_phy);
13779
13780         if (!err)
13781                 tg3_netif_start(tp);
13782
13783         tg3_full_unlock(tp);
13784
13785         if (!err)
13786                 tg3_phy_start(tp);
13787
13788         return err;
13789 }
13790
13791 static const struct net_device_ops tg3_netdev_ops = {
13792         .ndo_open               = tg3_open,
13793         .ndo_stop               = tg3_close,
13794         .ndo_start_xmit         = tg3_start_xmit,
13795         .ndo_get_stats64        = tg3_get_stats64,
13796         .ndo_validate_addr      = eth_validate_addr,
13797         .ndo_set_rx_mode        = tg3_set_rx_mode,
13798         .ndo_set_mac_address    = tg3_set_mac_addr,
13799         .ndo_do_ioctl           = tg3_ioctl,
13800         .ndo_tx_timeout         = tg3_tx_timeout,
13801         .ndo_change_mtu         = tg3_change_mtu,
13802         .ndo_fix_features       = tg3_fix_features,
13803         .ndo_set_features       = tg3_set_features,
13804 #ifdef CONFIG_NET_POLL_CONTROLLER
13805         .ndo_poll_controller    = tg3_poll_controller,
13806 #endif
13807 };
13808
13809 static void tg3_get_eeprom_size(struct tg3 *tp)
13810 {
13811         u32 cursize, val, magic;
13812
13813         tp->nvram_size = EEPROM_CHIP_SIZE;
13814
13815         if (tg3_nvram_read(tp, 0, &magic) != 0)
13816                 return;
13817
13818         if ((magic != TG3_EEPROM_MAGIC) &&
13819             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13820             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13821                 return;
13822
13823         /*
13824          * Size the chip by reading offsets at increasing powers of two.
13825          * When we encounter our validation signature, we know the addressing
13826          * has wrapped around, and thus have our chip size.
13827          */
13828         cursize = 0x10;
13829
13830         while (cursize < tp->nvram_size) {
13831                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13832                         return;
13833
13834                 if (val == magic)
13835                         break;
13836
13837                 cursize <<= 1;
13838         }
13839
13840         tp->nvram_size = cursize;
13841 }
13842
13843 static void tg3_get_nvram_size(struct tg3 *tp)
13844 {
13845         u32 val;
13846
13847         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13848                 return;
13849
13850         /* Selfboot format */
13851         if (val != TG3_EEPROM_MAGIC) {
13852                 tg3_get_eeprom_size(tp);
13853                 return;
13854         }
13855
13856         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13857                 if (val != 0) {
13858                         /* This is confusing.  We want to operate on the
13859                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13860                          * call will read from NVRAM and byteswap the data
13861                          * according to the byteswapping settings for all
13862                          * other register accesses.  This ensures the data we
13863                          * want will always reside in the lower 16-bits.
13864                          * However, the data in NVRAM is in LE format, which
13865                          * means the data from the NVRAM read will always be
13866                          * opposite the endianness of the CPU.  The 16-bit
13867                          * byteswap then brings the data to CPU endianness.
13868                          */
13869                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13870                         return;
13871                 }
13872         }
13873         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13874 }
13875
13876 static void tg3_get_nvram_info(struct tg3 *tp)
13877 {
13878         u32 nvcfg1;
13879
13880         nvcfg1 = tr32(NVRAM_CFG1);
13881         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13882                 tg3_flag_set(tp, FLASH);
13883         } else {
13884                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13885                 tw32(NVRAM_CFG1, nvcfg1);
13886         }
13887
13888         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13889             tg3_flag(tp, 5780_CLASS)) {
13890                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13891                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13892                         tp->nvram_jedecnum = JEDEC_ATMEL;
13893                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13894                         tg3_flag_set(tp, NVRAM_BUFFERED);
13895                         break;
13896                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13897                         tp->nvram_jedecnum = JEDEC_ATMEL;
13898                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13899                         break;
13900                 case FLASH_VENDOR_ATMEL_EEPROM:
13901                         tp->nvram_jedecnum = JEDEC_ATMEL;
13902                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13903                         tg3_flag_set(tp, NVRAM_BUFFERED);
13904                         break;
13905                 case FLASH_VENDOR_ST:
13906                         tp->nvram_jedecnum = JEDEC_ST;
13907                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13908                         tg3_flag_set(tp, NVRAM_BUFFERED);
13909                         break;
13910                 case FLASH_VENDOR_SAIFUN:
13911                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13912                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13913                         break;
13914                 case FLASH_VENDOR_SST_SMALL:
13915                 case FLASH_VENDOR_SST_LARGE:
13916                         tp->nvram_jedecnum = JEDEC_SST;
13917                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13918                         break;
13919                 }
13920         } else {
13921                 tp->nvram_jedecnum = JEDEC_ATMEL;
13922                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13923                 tg3_flag_set(tp, NVRAM_BUFFERED);
13924         }
13925 }
13926
13927 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13928 {
13929         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13930         case FLASH_5752PAGE_SIZE_256:
13931                 tp->nvram_pagesize = 256;
13932                 break;
13933         case FLASH_5752PAGE_SIZE_512:
13934                 tp->nvram_pagesize = 512;
13935                 break;
13936         case FLASH_5752PAGE_SIZE_1K:
13937                 tp->nvram_pagesize = 1024;
13938                 break;
13939         case FLASH_5752PAGE_SIZE_2K:
13940                 tp->nvram_pagesize = 2048;
13941                 break;
13942         case FLASH_5752PAGE_SIZE_4K:
13943                 tp->nvram_pagesize = 4096;
13944                 break;
13945         case FLASH_5752PAGE_SIZE_264:
13946                 tp->nvram_pagesize = 264;
13947                 break;
13948         case FLASH_5752PAGE_SIZE_528:
13949                 tp->nvram_pagesize = 528;
13950                 break;
13951         }
13952 }
13953
13954 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13955 {
13956         u32 nvcfg1;
13957
13958         nvcfg1 = tr32(NVRAM_CFG1);
13959
13960         /* NVRAM protection for TPM */
13961         if (nvcfg1 & (1 << 27))
13962                 tg3_flag_set(tp, PROTECTED_NVRAM);
13963
13964         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13965         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13966         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13967                 tp->nvram_jedecnum = JEDEC_ATMEL;
13968                 tg3_flag_set(tp, NVRAM_BUFFERED);
13969                 break;
13970         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13971                 tp->nvram_jedecnum = JEDEC_ATMEL;
13972                 tg3_flag_set(tp, NVRAM_BUFFERED);
13973                 tg3_flag_set(tp, FLASH);
13974                 break;
13975         case FLASH_5752VENDOR_ST_M45PE10:
13976         case FLASH_5752VENDOR_ST_M45PE20:
13977         case FLASH_5752VENDOR_ST_M45PE40:
13978                 tp->nvram_jedecnum = JEDEC_ST;
13979                 tg3_flag_set(tp, NVRAM_BUFFERED);
13980                 tg3_flag_set(tp, FLASH);
13981                 break;
13982         }
13983
13984         if (tg3_flag(tp, FLASH)) {
13985                 tg3_nvram_get_pagesize(tp, nvcfg1);
13986         } else {
13987                 /* For eeprom, set pagesize to maximum eeprom size */
13988                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13989
13990                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13991                 tw32(NVRAM_CFG1, nvcfg1);
13992         }
13993 }
13994
13995 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13996 {
13997         u32 nvcfg1, protect = 0;
13998
13999         nvcfg1 = tr32(NVRAM_CFG1);
14000
14001         /* NVRAM protection for TPM */
14002         if (nvcfg1 & (1 << 27)) {
14003                 tg3_flag_set(tp, PROTECTED_NVRAM);
14004                 protect = 1;
14005         }
14006
14007         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14008         switch (nvcfg1) {
14009         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14010         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14011         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14012         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14013                 tp->nvram_jedecnum = JEDEC_ATMEL;
14014                 tg3_flag_set(tp, NVRAM_BUFFERED);
14015                 tg3_flag_set(tp, FLASH);
14016                 tp->nvram_pagesize = 264;
14017                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14018                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14019                         tp->nvram_size = (protect ? 0x3e200 :
14020                                           TG3_NVRAM_SIZE_512KB);
14021                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14022                         tp->nvram_size = (protect ? 0x1f200 :
14023                                           TG3_NVRAM_SIZE_256KB);
14024                 else
14025                         tp->nvram_size = (protect ? 0x1f200 :
14026                                           TG3_NVRAM_SIZE_128KB);
14027                 break;
14028         case FLASH_5752VENDOR_ST_M45PE10:
14029         case FLASH_5752VENDOR_ST_M45PE20:
14030         case FLASH_5752VENDOR_ST_M45PE40:
14031                 tp->nvram_jedecnum = JEDEC_ST;
14032                 tg3_flag_set(tp, NVRAM_BUFFERED);
14033                 tg3_flag_set(tp, FLASH);
14034                 tp->nvram_pagesize = 256;
14035                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14036                         tp->nvram_size = (protect ?
14037                                           TG3_NVRAM_SIZE_64KB :
14038                                           TG3_NVRAM_SIZE_128KB);
14039                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14040                         tp->nvram_size = (protect ?
14041                                           TG3_NVRAM_SIZE_64KB :
14042                                           TG3_NVRAM_SIZE_256KB);
14043                 else
14044                         tp->nvram_size = (protect ?
14045                                           TG3_NVRAM_SIZE_128KB :
14046                                           TG3_NVRAM_SIZE_512KB);
14047                 break;
14048         }
14049 }
14050
14051 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14052 {
14053         u32 nvcfg1;
14054
14055         nvcfg1 = tr32(NVRAM_CFG1);
14056
14057         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14058         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14059         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14060         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14061         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14062                 tp->nvram_jedecnum = JEDEC_ATMEL;
14063                 tg3_flag_set(tp, NVRAM_BUFFERED);
14064                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14065
14066                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14067                 tw32(NVRAM_CFG1, nvcfg1);
14068                 break;
14069         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14070         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14071         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14072         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14073                 tp->nvram_jedecnum = JEDEC_ATMEL;
14074                 tg3_flag_set(tp, NVRAM_BUFFERED);
14075                 tg3_flag_set(tp, FLASH);
14076                 tp->nvram_pagesize = 264;
14077                 break;
14078         case FLASH_5752VENDOR_ST_M45PE10:
14079         case FLASH_5752VENDOR_ST_M45PE20:
14080         case FLASH_5752VENDOR_ST_M45PE40:
14081                 tp->nvram_jedecnum = JEDEC_ST;
14082                 tg3_flag_set(tp, NVRAM_BUFFERED);
14083                 tg3_flag_set(tp, FLASH);
14084                 tp->nvram_pagesize = 256;
14085                 break;
14086         }
14087 }
14088
14089 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14090 {
14091         u32 nvcfg1, protect = 0;
14092
14093         nvcfg1 = tr32(NVRAM_CFG1);
14094
14095         /* NVRAM protection for TPM */
14096         if (nvcfg1 & (1 << 27)) {
14097                 tg3_flag_set(tp, PROTECTED_NVRAM);
14098                 protect = 1;
14099         }
14100
14101         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14102         switch (nvcfg1) {
14103         case FLASH_5761VENDOR_ATMEL_ADB021D:
14104         case FLASH_5761VENDOR_ATMEL_ADB041D:
14105         case FLASH_5761VENDOR_ATMEL_ADB081D:
14106         case FLASH_5761VENDOR_ATMEL_ADB161D:
14107         case FLASH_5761VENDOR_ATMEL_MDB021D:
14108         case FLASH_5761VENDOR_ATMEL_MDB041D:
14109         case FLASH_5761VENDOR_ATMEL_MDB081D:
14110         case FLASH_5761VENDOR_ATMEL_MDB161D:
14111                 tp->nvram_jedecnum = JEDEC_ATMEL;
14112                 tg3_flag_set(tp, NVRAM_BUFFERED);
14113                 tg3_flag_set(tp, FLASH);
14114                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14115                 tp->nvram_pagesize = 256;
14116                 break;
14117         case FLASH_5761VENDOR_ST_A_M45PE20:
14118         case FLASH_5761VENDOR_ST_A_M45PE40:
14119         case FLASH_5761VENDOR_ST_A_M45PE80:
14120         case FLASH_5761VENDOR_ST_A_M45PE16:
14121         case FLASH_5761VENDOR_ST_M_M45PE20:
14122         case FLASH_5761VENDOR_ST_M_M45PE40:
14123         case FLASH_5761VENDOR_ST_M_M45PE80:
14124         case FLASH_5761VENDOR_ST_M_M45PE16:
14125                 tp->nvram_jedecnum = JEDEC_ST;
14126                 tg3_flag_set(tp, NVRAM_BUFFERED);
14127                 tg3_flag_set(tp, FLASH);
14128                 tp->nvram_pagesize = 256;
14129                 break;
14130         }
14131
14132         if (protect) {
14133                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14134         } else {
14135                 switch (nvcfg1) {
14136                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14137                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14138                 case FLASH_5761VENDOR_ST_A_M45PE16:
14139                 case FLASH_5761VENDOR_ST_M_M45PE16:
14140                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14141                         break;
14142                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14143                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14144                 case FLASH_5761VENDOR_ST_A_M45PE80:
14145                 case FLASH_5761VENDOR_ST_M_M45PE80:
14146                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14147                         break;
14148                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14149                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14150                 case FLASH_5761VENDOR_ST_A_M45PE40:
14151                 case FLASH_5761VENDOR_ST_M_M45PE40:
14152                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14153                         break;
14154                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14155                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14156                 case FLASH_5761VENDOR_ST_A_M45PE20:
14157                 case FLASH_5761VENDOR_ST_M_M45PE20:
14158                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14159                         break;
14160                 }
14161         }
14162 }
14163
14164 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14165 {
14166         tp->nvram_jedecnum = JEDEC_ATMEL;
14167         tg3_flag_set(tp, NVRAM_BUFFERED);
14168         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14169 }
14170
14171 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14172 {
14173         u32 nvcfg1;
14174
14175         nvcfg1 = tr32(NVRAM_CFG1);
14176
14177         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14178         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14179         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14180                 tp->nvram_jedecnum = JEDEC_ATMEL;
14181                 tg3_flag_set(tp, NVRAM_BUFFERED);
14182                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14183
14184                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14185                 tw32(NVRAM_CFG1, nvcfg1);
14186                 return;
14187         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14188         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14189         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14190         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14191         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14192         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14193         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14194                 tp->nvram_jedecnum = JEDEC_ATMEL;
14195                 tg3_flag_set(tp, NVRAM_BUFFERED);
14196                 tg3_flag_set(tp, FLASH);
14197
14198                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14199                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14200                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14201                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14202                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14203                         break;
14204                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14205                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14206                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14207                         break;
14208                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14209                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14210                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14211                         break;
14212                 }
14213                 break;
14214         case FLASH_5752VENDOR_ST_M45PE10:
14215         case FLASH_5752VENDOR_ST_M45PE20:
14216         case FLASH_5752VENDOR_ST_M45PE40:
14217                 tp->nvram_jedecnum = JEDEC_ST;
14218                 tg3_flag_set(tp, NVRAM_BUFFERED);
14219                 tg3_flag_set(tp, FLASH);
14220
14221                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14222                 case FLASH_5752VENDOR_ST_M45PE10:
14223                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14224                         break;
14225                 case FLASH_5752VENDOR_ST_M45PE20:
14226                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14227                         break;
14228                 case FLASH_5752VENDOR_ST_M45PE40:
14229                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14230                         break;
14231                 }
14232                 break;
14233         default:
14234                 tg3_flag_set(tp, NO_NVRAM);
14235                 return;
14236         }
14237
14238         tg3_nvram_get_pagesize(tp, nvcfg1);
14239         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14240                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14241 }
14242
14243
14244 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14245 {
14246         u32 nvcfg1;
14247
14248         nvcfg1 = tr32(NVRAM_CFG1);
14249
14250         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14251         case FLASH_5717VENDOR_ATMEL_EEPROM:
14252         case FLASH_5717VENDOR_MICRO_EEPROM:
14253                 tp->nvram_jedecnum = JEDEC_ATMEL;
14254                 tg3_flag_set(tp, NVRAM_BUFFERED);
14255                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14256
14257                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14258                 tw32(NVRAM_CFG1, nvcfg1);
14259                 return;
14260         case FLASH_5717VENDOR_ATMEL_MDB011D:
14261         case FLASH_5717VENDOR_ATMEL_ADB011B:
14262         case FLASH_5717VENDOR_ATMEL_ADB011D:
14263         case FLASH_5717VENDOR_ATMEL_MDB021D:
14264         case FLASH_5717VENDOR_ATMEL_ADB021B:
14265         case FLASH_5717VENDOR_ATMEL_ADB021D:
14266         case FLASH_5717VENDOR_ATMEL_45USPT:
14267                 tp->nvram_jedecnum = JEDEC_ATMEL;
14268                 tg3_flag_set(tp, NVRAM_BUFFERED);
14269                 tg3_flag_set(tp, FLASH);
14270
14271                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14272                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14273                         /* Detect size with tg3_nvram_get_size() */
14274                         break;
14275                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14276                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14277                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14278                         break;
14279                 default:
14280                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14281                         break;
14282                 }
14283                 break;
14284         case FLASH_5717VENDOR_ST_M_M25PE10:
14285         case FLASH_5717VENDOR_ST_A_M25PE10:
14286         case FLASH_5717VENDOR_ST_M_M45PE10:
14287         case FLASH_5717VENDOR_ST_A_M45PE10:
14288         case FLASH_5717VENDOR_ST_M_M25PE20:
14289         case FLASH_5717VENDOR_ST_A_M25PE20:
14290         case FLASH_5717VENDOR_ST_M_M45PE20:
14291         case FLASH_5717VENDOR_ST_A_M45PE20:
14292         case FLASH_5717VENDOR_ST_25USPT:
14293         case FLASH_5717VENDOR_ST_45USPT:
14294                 tp->nvram_jedecnum = JEDEC_ST;
14295                 tg3_flag_set(tp, NVRAM_BUFFERED);
14296                 tg3_flag_set(tp, FLASH);
14297
14298                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14299                 case FLASH_5717VENDOR_ST_M_M25PE20:
14300                 case FLASH_5717VENDOR_ST_M_M45PE20:
14301                         /* Detect size with tg3_nvram_get_size() */
14302                         break;
14303                 case FLASH_5717VENDOR_ST_A_M25PE20:
14304                 case FLASH_5717VENDOR_ST_A_M45PE20:
14305                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14306                         break;
14307                 default:
14308                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14309                         break;
14310                 }
14311                 break;
14312         default:
14313                 tg3_flag_set(tp, NO_NVRAM);
14314                 return;
14315         }
14316
14317         tg3_nvram_get_pagesize(tp, nvcfg1);
14318         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14319                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14320 }
14321
14322 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14323 {
14324         u32 nvcfg1, nvmpinstrp;
14325
14326         nvcfg1 = tr32(NVRAM_CFG1);
14327         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14328
14329         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14330                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14331                         tg3_flag_set(tp, NO_NVRAM);
14332                         return;
14333                 }
14334
14335                 switch (nvmpinstrp) {
14336                 case FLASH_5762_EEPROM_HD:
14337                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14338                         break;
14339                 case FLASH_5762_EEPROM_LD:
14340                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14341                         break;
14342                 case FLASH_5720VENDOR_M_ST_M45PE20:
14343                         /* This pinstrap supports multiple sizes, so force it
14344                          * to read the actual size from location 0xf0.
14345                          */
14346                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14347                         break;
14348                 }
14349         }
14350
14351         switch (nvmpinstrp) {
14352         case FLASH_5720_EEPROM_HD:
14353         case FLASH_5720_EEPROM_LD:
14354                 tp->nvram_jedecnum = JEDEC_ATMEL;
14355                 tg3_flag_set(tp, NVRAM_BUFFERED);
14356
14357                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14358                 tw32(NVRAM_CFG1, nvcfg1);
14359                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14360                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14361                 else
14362                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14363                 return;
14364         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14365         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14366         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14367         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14368         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14369         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14370         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14371         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14372         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14373         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14374         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14375         case FLASH_5720VENDOR_ATMEL_45USPT:
14376                 tp->nvram_jedecnum = JEDEC_ATMEL;
14377                 tg3_flag_set(tp, NVRAM_BUFFERED);
14378                 tg3_flag_set(tp, FLASH);
14379
14380                 switch (nvmpinstrp) {
14381                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14382                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14383                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14384                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14385                         break;
14386                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14387                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14388                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14389                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14390                         break;
14391                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14392                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14393                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14394                         break;
14395                 default:
14396                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14397                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14398                         break;
14399                 }
14400                 break;
14401         case FLASH_5720VENDOR_M_ST_M25PE10:
14402         case FLASH_5720VENDOR_M_ST_M45PE10:
14403         case FLASH_5720VENDOR_A_ST_M25PE10:
14404         case FLASH_5720VENDOR_A_ST_M45PE10:
14405         case FLASH_5720VENDOR_M_ST_M25PE20:
14406         case FLASH_5720VENDOR_M_ST_M45PE20:
14407         case FLASH_5720VENDOR_A_ST_M25PE20:
14408         case FLASH_5720VENDOR_A_ST_M45PE20:
14409         case FLASH_5720VENDOR_M_ST_M25PE40:
14410         case FLASH_5720VENDOR_M_ST_M45PE40:
14411         case FLASH_5720VENDOR_A_ST_M25PE40:
14412         case FLASH_5720VENDOR_A_ST_M45PE40:
14413         case FLASH_5720VENDOR_M_ST_M25PE80:
14414         case FLASH_5720VENDOR_M_ST_M45PE80:
14415         case FLASH_5720VENDOR_A_ST_M25PE80:
14416         case FLASH_5720VENDOR_A_ST_M45PE80:
14417         case FLASH_5720VENDOR_ST_25USPT:
14418         case FLASH_5720VENDOR_ST_45USPT:
14419                 tp->nvram_jedecnum = JEDEC_ST;
14420                 tg3_flag_set(tp, NVRAM_BUFFERED);
14421                 tg3_flag_set(tp, FLASH);
14422
14423                 switch (nvmpinstrp) {
14424                 case FLASH_5720VENDOR_M_ST_M25PE20:
14425                 case FLASH_5720VENDOR_M_ST_M45PE20:
14426                 case FLASH_5720VENDOR_A_ST_M25PE20:
14427                 case FLASH_5720VENDOR_A_ST_M45PE20:
14428                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14429                         break;
14430                 case FLASH_5720VENDOR_M_ST_M25PE40:
14431                 case FLASH_5720VENDOR_M_ST_M45PE40:
14432                 case FLASH_5720VENDOR_A_ST_M25PE40:
14433                 case FLASH_5720VENDOR_A_ST_M45PE40:
14434                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14435                         break;
14436                 case FLASH_5720VENDOR_M_ST_M25PE80:
14437                 case FLASH_5720VENDOR_M_ST_M45PE80:
14438                 case FLASH_5720VENDOR_A_ST_M25PE80:
14439                 case FLASH_5720VENDOR_A_ST_M45PE80:
14440                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14441                         break;
14442                 default:
14443                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14444                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14445                         break;
14446                 }
14447                 break;
14448         default:
14449                 tg3_flag_set(tp, NO_NVRAM);
14450                 return;
14451         }
14452
14453         tg3_nvram_get_pagesize(tp, nvcfg1);
14454         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14455                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14456
14457         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14458                 u32 val;
14459
14460                 if (tg3_nvram_read(tp, 0, &val))
14461                         return;
14462
14463                 if (val != TG3_EEPROM_MAGIC &&
14464                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14465                         tg3_flag_set(tp, NO_NVRAM);
14466         }
14467 }
14468
14469 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14470 static void tg3_nvram_init(struct tg3 *tp)
14471 {
14472         if (tg3_flag(tp, IS_SSB_CORE)) {
14473                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14474                 tg3_flag_clear(tp, NVRAM);
14475                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14476                 tg3_flag_set(tp, NO_NVRAM);
14477                 return;
14478         }
14479
14480         tw32_f(GRC_EEPROM_ADDR,
14481              (EEPROM_ADDR_FSM_RESET |
14482               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14483                EEPROM_ADDR_CLKPERD_SHIFT)));
14484
14485         msleep(1);
14486
14487         /* Enable seeprom accesses. */
14488         tw32_f(GRC_LOCAL_CTRL,
14489              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14490         udelay(100);
14491
14492         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14493             tg3_asic_rev(tp) != ASIC_REV_5701) {
14494                 tg3_flag_set(tp, NVRAM);
14495
14496                 if (tg3_nvram_lock(tp)) {
14497                         netdev_warn(tp->dev,
14498                                     "Cannot get nvram lock, %s failed\n",
14499                                     __func__);
14500                         return;
14501                 }
14502                 tg3_enable_nvram_access(tp);
14503
14504                 tp->nvram_size = 0;
14505
14506                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14507                         tg3_get_5752_nvram_info(tp);
14508                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14509                         tg3_get_5755_nvram_info(tp);
14510                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14511                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14512                          tg3_asic_rev(tp) == ASIC_REV_5785)
14513                         tg3_get_5787_nvram_info(tp);
14514                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14515                         tg3_get_5761_nvram_info(tp);
14516                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14517                         tg3_get_5906_nvram_info(tp);
14518                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14519                          tg3_flag(tp, 57765_CLASS))
14520                         tg3_get_57780_nvram_info(tp);
14521                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14522                          tg3_asic_rev(tp) == ASIC_REV_5719)
14523                         tg3_get_5717_nvram_info(tp);
14524                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14525                          tg3_asic_rev(tp) == ASIC_REV_5762)
14526                         tg3_get_5720_nvram_info(tp);
14527                 else
14528                         tg3_get_nvram_info(tp);
14529
14530                 if (tp->nvram_size == 0)
14531                         tg3_get_nvram_size(tp);
14532
14533                 tg3_disable_nvram_access(tp);
14534                 tg3_nvram_unlock(tp);
14535
14536         } else {
14537                 tg3_flag_clear(tp, NVRAM);
14538                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14539
14540                 tg3_get_eeprom_size(tp);
14541         }
14542 }
14543
14544 struct subsys_tbl_ent {
14545         u16 subsys_vendor, subsys_devid;
14546         u32 phy_id;
14547 };
14548
14549 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14550         /* Broadcom boards. */
14551         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14552           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14553         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14554           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14555         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14556           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14557         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14558           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14559         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14560           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14561         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14562           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14563         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14564           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14565         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14566           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14567         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14568           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14569         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14570           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14571         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14572           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14573
14574         /* 3com boards. */
14575         { TG3PCI_SUBVENDOR_ID_3COM,
14576           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14577         { TG3PCI_SUBVENDOR_ID_3COM,
14578           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14579         { TG3PCI_SUBVENDOR_ID_3COM,
14580           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14581         { TG3PCI_SUBVENDOR_ID_3COM,
14582           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14583         { TG3PCI_SUBVENDOR_ID_3COM,
14584           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14585
14586         /* DELL boards. */
14587         { TG3PCI_SUBVENDOR_ID_DELL,
14588           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14589         { TG3PCI_SUBVENDOR_ID_DELL,
14590           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14591         { TG3PCI_SUBVENDOR_ID_DELL,
14592           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14593         { TG3PCI_SUBVENDOR_ID_DELL,
14594           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14595
14596         /* Compaq boards. */
14597         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14598           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14599         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14600           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14601         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14602           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14603         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14604           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14605         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14606           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14607
14608         /* IBM boards. */
14609         { TG3PCI_SUBVENDOR_ID_IBM,
14610           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14611 };
14612
14613 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14614 {
14615         int i;
14616
14617         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14618                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14619                      tp->pdev->subsystem_vendor) &&
14620                     (subsys_id_to_phy_id[i].subsys_devid ==
14621                      tp->pdev->subsystem_device))
14622                         return &subsys_id_to_phy_id[i];
14623         }
14624         return NULL;
14625 }
14626
14627 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14628 {
14629         u32 val;
14630
14631         tp->phy_id = TG3_PHY_ID_INVALID;
14632         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14633
14634         /* Assume an onboard device and WOL capable by default.  */
14635         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14636         tg3_flag_set(tp, WOL_CAP);
14637
14638         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14639                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14640                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14641                         tg3_flag_set(tp, IS_NIC);
14642                 }
14643                 val = tr32(VCPU_CFGSHDW);
14644                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14645                         tg3_flag_set(tp, ASPM_WORKAROUND);
14646                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14647                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14648                         tg3_flag_set(tp, WOL_ENABLE);
14649                         device_set_wakeup_enable(&tp->pdev->dev, true);
14650                 }
14651                 goto done;
14652         }
14653
14654         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14655         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14656                 u32 nic_cfg, led_cfg;
14657                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14658                 int eeprom_phy_serdes = 0;
14659
14660                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14661                 tp->nic_sram_data_cfg = nic_cfg;
14662
14663                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14664                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14665                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14666                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14667                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14668                     (ver > 0) && (ver < 0x100))
14669                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14670
14671                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14672                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14673
14674                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14675                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14676                         eeprom_phy_serdes = 1;
14677
14678                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14679                 if (nic_phy_id != 0) {
14680                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14681                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14682
14683                         eeprom_phy_id  = (id1 >> 16) << 10;
14684                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14685                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14686                 } else
14687                         eeprom_phy_id = 0;
14688
14689                 tp->phy_id = eeprom_phy_id;
14690                 if (eeprom_phy_serdes) {
14691                         if (!tg3_flag(tp, 5705_PLUS))
14692                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14693                         else
14694                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14695                 }
14696
14697                 if (tg3_flag(tp, 5750_PLUS))
14698                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14699                                     SHASTA_EXT_LED_MODE_MASK);
14700                 else
14701                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14702
14703                 switch (led_cfg) {
14704                 default:
14705                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14706                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14707                         break;
14708
14709                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14710                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14711                         break;
14712
14713                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14714                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14715
14716                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14717                          * read on some older 5700/5701 bootcode.
14718                          */
14719                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14720                             tg3_asic_rev(tp) == ASIC_REV_5701)
14721                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14722
14723                         break;
14724
14725                 case SHASTA_EXT_LED_SHARED:
14726                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14727                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14728                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14729                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14730                                                  LED_CTRL_MODE_PHY_2);
14731                         break;
14732
14733                 case SHASTA_EXT_LED_MAC:
14734                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14735                         break;
14736
14737                 case SHASTA_EXT_LED_COMBO:
14738                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14739                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14740                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14741                                                  LED_CTRL_MODE_PHY_2);
14742                         break;
14743
14744                 }
14745
14746                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14747                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14748                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14749                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14750
14751                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14752                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14753
14754                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14755                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14756                         if ((tp->pdev->subsystem_vendor ==
14757                              PCI_VENDOR_ID_ARIMA) &&
14758                             (tp->pdev->subsystem_device == 0x205a ||
14759                              tp->pdev->subsystem_device == 0x2063))
14760                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14761                 } else {
14762                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14763                         tg3_flag_set(tp, IS_NIC);
14764                 }
14765
14766                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14767                         tg3_flag_set(tp, ENABLE_ASF);
14768                         if (tg3_flag(tp, 5750_PLUS))
14769                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14770                 }
14771
14772                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14773                     tg3_flag(tp, 5750_PLUS))
14774                         tg3_flag_set(tp, ENABLE_APE);
14775
14776                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14777                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14778                         tg3_flag_clear(tp, WOL_CAP);
14779
14780                 if (tg3_flag(tp, WOL_CAP) &&
14781                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14782                         tg3_flag_set(tp, WOL_ENABLE);
14783                         device_set_wakeup_enable(&tp->pdev->dev, true);
14784                 }
14785
14786                 if (cfg2 & (1 << 17))
14787                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14788
14789                 /* serdes signal pre-emphasis in register 0x590 set by */
14790                 /* bootcode if bit 18 is set */
14791                 if (cfg2 & (1 << 18))
14792                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14793
14794                 if ((tg3_flag(tp, 57765_PLUS) ||
14795                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14796                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14797                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14798                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14799
14800                 if (tg3_flag(tp, PCI_EXPRESS)) {
14801                         u32 cfg3;
14802
14803                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14804                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14805                             !tg3_flag(tp, 57765_PLUS) &&
14806                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14807                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14808                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14809                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14810                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14811                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14812                 }
14813
14814                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14815                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14816                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14817                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14818                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14819                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14820         }
14821 done:
14822         if (tg3_flag(tp, WOL_CAP))
14823                 device_set_wakeup_enable(&tp->pdev->dev,
14824                                          tg3_flag(tp, WOL_ENABLE));
14825         else
14826                 device_set_wakeup_capable(&tp->pdev->dev, false);
14827 }
14828
14829 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14830 {
14831         int i, err;
14832         u32 val2, off = offset * 8;
14833
14834         err = tg3_nvram_lock(tp);
14835         if (err)
14836                 return err;
14837
14838         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14839         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14840                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14841         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14842         udelay(10);
14843
14844         for (i = 0; i < 100; i++) {
14845                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14846                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14847                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14848                         break;
14849                 }
14850                 udelay(10);
14851         }
14852
14853         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14854
14855         tg3_nvram_unlock(tp);
14856         if (val2 & APE_OTP_STATUS_CMD_DONE)
14857                 return 0;
14858
14859         return -EBUSY;
14860 }
14861
14862 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14863 {
14864         int i;
14865         u32 val;
14866
14867         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14868         tw32(OTP_CTRL, cmd);
14869
14870         /* Wait for up to 1 ms for command to execute. */
14871         for (i = 0; i < 100; i++) {
14872                 val = tr32(OTP_STATUS);
14873                 if (val & OTP_STATUS_CMD_DONE)
14874                         break;
14875                 udelay(10);
14876         }
14877
14878         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14879 }
14880
14881 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14882  * configuration is a 32-bit value that straddles the alignment boundary.
14883  * We do two 32-bit reads and then shift and merge the results.
14884  */
14885 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14886 {
14887         u32 bhalf_otp, thalf_otp;
14888
14889         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14890
14891         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14892                 return 0;
14893
14894         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14895
14896         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14897                 return 0;
14898
14899         thalf_otp = tr32(OTP_READ_DATA);
14900
14901         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14902
14903         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14904                 return 0;
14905
14906         bhalf_otp = tr32(OTP_READ_DATA);
14907
14908         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14909 }
14910
14911 static void tg3_phy_init_link_config(struct tg3 *tp)
14912 {
14913         u32 adv = ADVERTISED_Autoneg;
14914
14915         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14916                 adv |= ADVERTISED_1000baseT_Half |
14917                        ADVERTISED_1000baseT_Full;
14918
14919         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14920                 adv |= ADVERTISED_100baseT_Half |
14921                        ADVERTISED_100baseT_Full |
14922                        ADVERTISED_10baseT_Half |
14923                        ADVERTISED_10baseT_Full |
14924                        ADVERTISED_TP;
14925         else
14926                 adv |= ADVERTISED_FIBRE;
14927
14928         tp->link_config.advertising = adv;
14929         tp->link_config.speed = SPEED_UNKNOWN;
14930         tp->link_config.duplex = DUPLEX_UNKNOWN;
14931         tp->link_config.autoneg = AUTONEG_ENABLE;
14932         tp->link_config.active_speed = SPEED_UNKNOWN;
14933         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14934
14935         tp->old_link = -1;
14936 }
14937
14938 static int tg3_phy_probe(struct tg3 *tp)
14939 {
14940         u32 hw_phy_id_1, hw_phy_id_2;
14941         u32 hw_phy_id, hw_phy_id_masked;
14942         int err;
14943
14944         /* flow control autonegotiation is default behavior */
14945         tg3_flag_set(tp, PAUSE_AUTONEG);
14946         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14947
14948         if (tg3_flag(tp, ENABLE_APE)) {
14949                 switch (tp->pci_fn) {
14950                 case 0:
14951                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14952                         break;
14953                 case 1:
14954                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14955                         break;
14956                 case 2:
14957                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14958                         break;
14959                 case 3:
14960                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14961                         break;
14962                 }
14963         }
14964
14965         if (!tg3_flag(tp, ENABLE_ASF) &&
14966             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14967             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14968                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14969                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14970
14971         if (tg3_flag(tp, USE_PHYLIB))
14972                 return tg3_phy_init(tp);
14973
14974         /* Reading the PHY ID register can conflict with ASF
14975          * firmware access to the PHY hardware.
14976          */
14977         err = 0;
14978         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14979                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14980         } else {
14981                 /* Now read the physical PHY_ID from the chip and verify
14982                  * that it is sane.  If it doesn't look good, we fall back
14983                  * to either the hard-coded table based PHY_ID and failing
14984                  * that the value found in the eeprom area.
14985                  */
14986                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14987                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14988
14989                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14990                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14991                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14992
14993                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14994         }
14995
14996         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14997                 tp->phy_id = hw_phy_id;
14998                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14999                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15000                 else
15001                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15002         } else {
15003                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15004                         /* Do nothing, phy ID already set up in
15005                          * tg3_get_eeprom_hw_cfg().
15006                          */
15007                 } else {
15008                         struct subsys_tbl_ent *p;
15009
15010                         /* No eeprom signature?  Try the hardcoded
15011                          * subsys device table.
15012                          */
15013                         p = tg3_lookup_by_subsys(tp);
15014                         if (p) {
15015                                 tp->phy_id = p->phy_id;
15016                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15017                                 /* For now we saw the IDs 0xbc050cd0,
15018                                  * 0xbc050f80 and 0xbc050c30 on devices
15019                                  * connected to an BCM4785 and there are
15020                                  * probably more. Just assume that the phy is
15021                                  * supported when it is connected to a SSB core
15022                                  * for now.
15023                                  */
15024                                 return -ENODEV;
15025                         }
15026
15027                         if (!tp->phy_id ||
15028                             tp->phy_id == TG3_PHY_ID_BCM8002)
15029                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15030                 }
15031         }
15032
15033         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15034             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15035              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15036              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15037              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15038              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15039               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15040              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15041               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
15042                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15043
15044         tg3_phy_init_link_config(tp);
15045
15046         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15047             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15048             !tg3_flag(tp, ENABLE_APE) &&
15049             !tg3_flag(tp, ENABLE_ASF)) {
15050                 u32 bmsr, dummy;
15051
15052                 tg3_readphy(tp, MII_BMSR, &bmsr);
15053                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15054                     (bmsr & BMSR_LSTATUS))
15055                         goto skip_phy_reset;
15056
15057                 err = tg3_phy_reset(tp);
15058                 if (err)
15059                         return err;
15060
15061                 tg3_phy_set_wirespeed(tp);
15062
15063                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15064                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15065                                             tp->link_config.flowctrl);
15066
15067                         tg3_writephy(tp, MII_BMCR,
15068                                      BMCR_ANENABLE | BMCR_ANRESTART);
15069                 }
15070         }
15071
15072 skip_phy_reset:
15073         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15074                 err = tg3_init_5401phy_dsp(tp);
15075                 if (err)
15076                         return err;
15077
15078                 err = tg3_init_5401phy_dsp(tp);
15079         }
15080
15081         return err;
15082 }
15083
15084 static void tg3_read_vpd(struct tg3 *tp)
15085 {
15086         u8 *vpd_data;
15087         unsigned int block_end, rosize, len;
15088         u32 vpdlen;
15089         int j, i = 0;
15090
15091         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15092         if (!vpd_data)
15093                 goto out_no_vpd;
15094
15095         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15096         if (i < 0)
15097                 goto out_not_found;
15098
15099         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15100         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15101         i += PCI_VPD_LRDT_TAG_SIZE;
15102
15103         if (block_end > vpdlen)
15104                 goto out_not_found;
15105
15106         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15107                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15108         if (j > 0) {
15109                 len = pci_vpd_info_field_size(&vpd_data[j]);
15110
15111                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15112                 if (j + len > block_end || len != 4 ||
15113                     memcmp(&vpd_data[j], "1028", 4))
15114                         goto partno;
15115
15116                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15117                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15118                 if (j < 0)
15119                         goto partno;
15120
15121                 len = pci_vpd_info_field_size(&vpd_data[j]);
15122
15123                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15124                 if (j + len > block_end)
15125                         goto partno;
15126
15127                 if (len >= sizeof(tp->fw_ver))
15128                         len = sizeof(tp->fw_ver) - 1;
15129                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15130                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15131                          &vpd_data[j]);
15132         }
15133
15134 partno:
15135         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15136                                       PCI_VPD_RO_KEYWORD_PARTNO);
15137         if (i < 0)
15138                 goto out_not_found;
15139
15140         len = pci_vpd_info_field_size(&vpd_data[i]);
15141
15142         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15143         if (len > TG3_BPN_SIZE ||
15144             (len + i) > vpdlen)
15145                 goto out_not_found;
15146
15147         memcpy(tp->board_part_number, &vpd_data[i], len);
15148
15149 out_not_found:
15150         kfree(vpd_data);
15151         if (tp->board_part_number[0])
15152                 return;
15153
15154 out_no_vpd:
15155         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15156                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15157                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15158                         strcpy(tp->board_part_number, "BCM5717");
15159                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15160                         strcpy(tp->board_part_number, "BCM5718");
15161                 else
15162                         goto nomatch;
15163         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15164                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15165                         strcpy(tp->board_part_number, "BCM57780");
15166                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15167                         strcpy(tp->board_part_number, "BCM57760");
15168                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15169                         strcpy(tp->board_part_number, "BCM57790");
15170                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15171                         strcpy(tp->board_part_number, "BCM57788");
15172                 else
15173                         goto nomatch;
15174         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15175                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15176                         strcpy(tp->board_part_number, "BCM57761");
15177                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15178                         strcpy(tp->board_part_number, "BCM57765");
15179                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15180                         strcpy(tp->board_part_number, "BCM57781");
15181                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15182                         strcpy(tp->board_part_number, "BCM57785");
15183                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15184                         strcpy(tp->board_part_number, "BCM57791");
15185                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15186                         strcpy(tp->board_part_number, "BCM57795");
15187                 else
15188                         goto nomatch;
15189         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15190                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15191                         strcpy(tp->board_part_number, "BCM57762");
15192                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15193                         strcpy(tp->board_part_number, "BCM57766");
15194                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15195                         strcpy(tp->board_part_number, "BCM57782");
15196                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15197                         strcpy(tp->board_part_number, "BCM57786");
15198                 else
15199                         goto nomatch;
15200         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15201                 strcpy(tp->board_part_number, "BCM95906");
15202         } else {
15203 nomatch:
15204                 strcpy(tp->board_part_number, "none");
15205         }
15206 }
15207
15208 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15209 {
15210         u32 val;
15211
15212         if (tg3_nvram_read(tp, offset, &val) ||
15213             (val & 0xfc000000) != 0x0c000000 ||
15214             tg3_nvram_read(tp, offset + 4, &val) ||
15215             val != 0)
15216                 return 0;
15217
15218         return 1;
15219 }
15220
15221 static void tg3_read_bc_ver(struct tg3 *tp)
15222 {
15223         u32 val, offset, start, ver_offset;
15224         int i, dst_off;
15225         bool newver = false;
15226
15227         if (tg3_nvram_read(tp, 0xc, &offset) ||
15228             tg3_nvram_read(tp, 0x4, &start))
15229                 return;
15230
15231         offset = tg3_nvram_logical_addr(tp, offset);
15232
15233         if (tg3_nvram_read(tp, offset, &val))
15234                 return;
15235
15236         if ((val & 0xfc000000) == 0x0c000000) {
15237                 if (tg3_nvram_read(tp, offset + 4, &val))
15238                         return;
15239
15240                 if (val == 0)
15241                         newver = true;
15242         }
15243
15244         dst_off = strlen(tp->fw_ver);
15245
15246         if (newver) {
15247                 if (TG3_VER_SIZE - dst_off < 16 ||
15248                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15249                         return;
15250
15251                 offset = offset + ver_offset - start;
15252                 for (i = 0; i < 16; i += 4) {
15253                         __be32 v;
15254                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15255                                 return;
15256
15257                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15258                 }
15259         } else {
15260                 u32 major, minor;
15261
15262                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15263                         return;
15264
15265                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15266                         TG3_NVM_BCVER_MAJSFT;
15267                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15268                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15269                          "v%d.%02d", major, minor);
15270         }
15271 }
15272
15273 static void tg3_read_hwsb_ver(struct tg3 *tp)
15274 {
15275         u32 val, major, minor;
15276
15277         /* Use native endian representation */
15278         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15279                 return;
15280
15281         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15282                 TG3_NVM_HWSB_CFG1_MAJSFT;
15283         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15284                 TG3_NVM_HWSB_CFG1_MINSFT;
15285
15286         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15287 }
15288
15289 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15290 {
15291         u32 offset, major, minor, build;
15292
15293         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15294
15295         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15296                 return;
15297
15298         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15299         case TG3_EEPROM_SB_REVISION_0:
15300                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15301                 break;
15302         case TG3_EEPROM_SB_REVISION_2:
15303                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15304                 break;
15305         case TG3_EEPROM_SB_REVISION_3:
15306                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15307                 break;
15308         case TG3_EEPROM_SB_REVISION_4:
15309                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15310                 break;
15311         case TG3_EEPROM_SB_REVISION_5:
15312                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15313                 break;
15314         case TG3_EEPROM_SB_REVISION_6:
15315                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15316                 break;
15317         default:
15318                 return;
15319         }
15320
15321         if (tg3_nvram_read(tp, offset, &val))
15322                 return;
15323
15324         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15325                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15326         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15327                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15328         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15329
15330         if (minor > 99 || build > 26)
15331                 return;
15332
15333         offset = strlen(tp->fw_ver);
15334         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15335                  " v%d.%02d", major, minor);
15336
15337         if (build > 0) {
15338                 offset = strlen(tp->fw_ver);
15339                 if (offset < TG3_VER_SIZE - 1)
15340                         tp->fw_ver[offset] = 'a' + build - 1;
15341         }
15342 }
15343
15344 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15345 {
15346         u32 val, offset, start;
15347         int i, vlen;
15348
15349         for (offset = TG3_NVM_DIR_START;
15350              offset < TG3_NVM_DIR_END;
15351              offset += TG3_NVM_DIRENT_SIZE) {
15352                 if (tg3_nvram_read(tp, offset, &val))
15353                         return;
15354
15355                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15356                         break;
15357         }
15358
15359         if (offset == TG3_NVM_DIR_END)
15360                 return;
15361
15362         if (!tg3_flag(tp, 5705_PLUS))
15363                 start = 0x08000000;
15364         else if (tg3_nvram_read(tp, offset - 4, &start))
15365                 return;
15366
15367         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15368             !tg3_fw_img_is_valid(tp, offset) ||
15369             tg3_nvram_read(tp, offset + 8, &val))
15370                 return;
15371
15372         offset += val - start;
15373
15374         vlen = strlen(tp->fw_ver);
15375
15376         tp->fw_ver[vlen++] = ',';
15377         tp->fw_ver[vlen++] = ' ';
15378
15379         for (i = 0; i < 4; i++) {
15380                 __be32 v;
15381                 if (tg3_nvram_read_be32(tp, offset, &v))
15382                         return;
15383
15384                 offset += sizeof(v);
15385
15386                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15387                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15388                         break;
15389                 }
15390
15391                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15392                 vlen += sizeof(v);
15393         }
15394 }
15395
15396 static void tg3_probe_ncsi(struct tg3 *tp)
15397 {
15398         u32 apedata;
15399
15400         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15401         if (apedata != APE_SEG_SIG_MAGIC)
15402                 return;
15403
15404         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15405         if (!(apedata & APE_FW_STATUS_READY))
15406                 return;
15407
15408         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15409                 tg3_flag_set(tp, APE_HAS_NCSI);
15410 }
15411
15412 static void tg3_read_dash_ver(struct tg3 *tp)
15413 {
15414         int vlen;
15415         u32 apedata;
15416         char *fwtype;
15417
15418         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15419
15420         if (tg3_flag(tp, APE_HAS_NCSI))
15421                 fwtype = "NCSI";
15422         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15423                 fwtype = "SMASH";
15424         else
15425                 fwtype = "DASH";
15426
15427         vlen = strlen(tp->fw_ver);
15428
15429         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15430                  fwtype,
15431                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15432                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15433                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15434                  (apedata & APE_FW_VERSION_BLDMSK));
15435 }
15436
15437 static void tg3_read_otp_ver(struct tg3 *tp)
15438 {
15439         u32 val, val2;
15440
15441         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15442                 return;
15443
15444         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15445             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15446             TG3_OTP_MAGIC0_VALID(val)) {
15447                 u64 val64 = (u64) val << 32 | val2;
15448                 u32 ver = 0;
15449                 int i, vlen;
15450
15451                 for (i = 0; i < 7; i++) {
15452                         if ((val64 & 0xff) == 0)
15453                                 break;
15454                         ver = val64 & 0xff;
15455                         val64 >>= 8;
15456                 }
15457                 vlen = strlen(tp->fw_ver);
15458                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15459         }
15460 }
15461
15462 static void tg3_read_fw_ver(struct tg3 *tp)
15463 {
15464         u32 val;
15465         bool vpd_vers = false;
15466
15467         if (tp->fw_ver[0] != 0)
15468                 vpd_vers = true;
15469
15470         if (tg3_flag(tp, NO_NVRAM)) {
15471                 strcat(tp->fw_ver, "sb");
15472                 tg3_read_otp_ver(tp);
15473                 return;
15474         }
15475
15476         if (tg3_nvram_read(tp, 0, &val))
15477                 return;
15478
15479         if (val == TG3_EEPROM_MAGIC)
15480                 tg3_read_bc_ver(tp);
15481         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15482                 tg3_read_sb_ver(tp, val);
15483         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15484                 tg3_read_hwsb_ver(tp);
15485
15486         if (tg3_flag(tp, ENABLE_ASF)) {
15487                 if (tg3_flag(tp, ENABLE_APE)) {
15488                         tg3_probe_ncsi(tp);
15489                         if (!vpd_vers)
15490                                 tg3_read_dash_ver(tp);
15491                 } else if (!vpd_vers) {
15492                         tg3_read_mgmtfw_ver(tp);
15493                 }
15494         }
15495
15496         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15497 }
15498
15499 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15500 {
15501         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15502                 return TG3_RX_RET_MAX_SIZE_5717;
15503         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15504                 return TG3_RX_RET_MAX_SIZE_5700;
15505         else
15506                 return TG3_RX_RET_MAX_SIZE_5705;
15507 }
15508
15509 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15510         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15511         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15512         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15513         { },
15514 };
15515
15516 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15517 {
15518         struct pci_dev *peer;
15519         unsigned int func, devnr = tp->pdev->devfn & ~7;
15520
15521         for (func = 0; func < 8; func++) {
15522                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15523                 if (peer && peer != tp->pdev)
15524                         break;
15525                 pci_dev_put(peer);
15526         }
15527         /* 5704 can be configured in single-port mode, set peer to
15528          * tp->pdev in that case.
15529          */
15530         if (!peer) {
15531                 peer = tp->pdev;
15532                 return peer;
15533         }
15534
15535         /*
15536          * We don't need to keep the refcount elevated; there's no way
15537          * to remove one half of this device without removing the other
15538          */
15539         pci_dev_put(peer);
15540
15541         return peer;
15542 }
15543
15544 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15545 {
15546         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15547         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15548                 u32 reg;
15549
15550                 /* All devices that use the alternate
15551                  * ASIC REV location have a CPMU.
15552                  */
15553                 tg3_flag_set(tp, CPMU_PRESENT);
15554
15555                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15556                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15557                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15558                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15559                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15560                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15561                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15562                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15563                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15564                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15565                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15566                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15567                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15568                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15569                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15570                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15571                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15572                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15573                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15574                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15575                 else
15576                         reg = TG3PCI_PRODID_ASICREV;
15577
15578                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15579         }
15580
15581         /* Wrong chip ID in 5752 A0. This code can be removed later
15582          * as A0 is not in production.
15583          */
15584         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15585                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15586
15587         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15588                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15589
15590         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15591             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15592             tg3_asic_rev(tp) == ASIC_REV_5720)
15593                 tg3_flag_set(tp, 5717_PLUS);
15594
15595         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15596             tg3_asic_rev(tp) == ASIC_REV_57766)
15597                 tg3_flag_set(tp, 57765_CLASS);
15598
15599         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15600              tg3_asic_rev(tp) == ASIC_REV_5762)
15601                 tg3_flag_set(tp, 57765_PLUS);
15602
15603         /* Intentionally exclude ASIC_REV_5906 */
15604         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15605             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15606             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15607             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15608             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15609             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15610             tg3_flag(tp, 57765_PLUS))
15611                 tg3_flag_set(tp, 5755_PLUS);
15612
15613         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15614             tg3_asic_rev(tp) == ASIC_REV_5714)
15615                 tg3_flag_set(tp, 5780_CLASS);
15616
15617         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15618             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15619             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15620             tg3_flag(tp, 5755_PLUS) ||
15621             tg3_flag(tp, 5780_CLASS))
15622                 tg3_flag_set(tp, 5750_PLUS);
15623
15624         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15625             tg3_flag(tp, 5750_PLUS))
15626                 tg3_flag_set(tp, 5705_PLUS);
15627 }
15628
15629 static bool tg3_10_100_only_device(struct tg3 *tp,
15630                                    const struct pci_device_id *ent)
15631 {
15632         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15633
15634         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15635              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15636             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15637                 return true;
15638
15639         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15640                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15641                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15642                                 return true;
15643                 } else {
15644                         return true;
15645                 }
15646         }
15647
15648         return false;
15649 }
15650
15651 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15652 {
15653         u32 misc_ctrl_reg;
15654         u32 pci_state_reg, grc_misc_cfg;
15655         u32 val;
15656         u16 pci_cmd;
15657         int err;
15658
15659         /* Force memory write invalidate off.  If we leave it on,
15660          * then on 5700_BX chips we have to enable a workaround.
15661          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15662          * to match the cacheline size.  The Broadcom driver have this
15663          * workaround but turns MWI off all the times so never uses
15664          * it.  This seems to suggest that the workaround is insufficient.
15665          */
15666         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15667         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15668         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15669
15670         /* Important! -- Make sure register accesses are byteswapped
15671          * correctly.  Also, for those chips that require it, make
15672          * sure that indirect register accesses are enabled before
15673          * the first operation.
15674          */
15675         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15676                               &misc_ctrl_reg);
15677         tp->misc_host_ctrl |= (misc_ctrl_reg &
15678                                MISC_HOST_CTRL_CHIPREV);
15679         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15680                                tp->misc_host_ctrl);
15681
15682         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15683
15684         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15685          * we need to disable memory and use config. cycles
15686          * only to access all registers. The 5702/03 chips
15687          * can mistakenly decode the special cycles from the
15688          * ICH chipsets as memory write cycles, causing corruption
15689          * of register and memory space. Only certain ICH bridges
15690          * will drive special cycles with non-zero data during the
15691          * address phase which can fall within the 5703's address
15692          * range. This is not an ICH bug as the PCI spec allows
15693          * non-zero address during special cycles. However, only
15694          * these ICH bridges are known to drive non-zero addresses
15695          * during special cycles.
15696          *
15697          * Since special cycles do not cross PCI bridges, we only
15698          * enable this workaround if the 5703 is on the secondary
15699          * bus of these ICH bridges.
15700          */
15701         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15702             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15703                 static struct tg3_dev_id {
15704                         u32     vendor;
15705                         u32     device;
15706                         u32     rev;
15707                 } ich_chipsets[] = {
15708                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15709                           PCI_ANY_ID },
15710                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15711                           PCI_ANY_ID },
15712                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15713                           0xa },
15714                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15715                           PCI_ANY_ID },
15716                         { },
15717                 };
15718                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15719                 struct pci_dev *bridge = NULL;
15720
15721                 while (pci_id->vendor != 0) {
15722                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15723                                                 bridge);
15724                         if (!bridge) {
15725                                 pci_id++;
15726                                 continue;
15727                         }
15728                         if (pci_id->rev != PCI_ANY_ID) {
15729                                 if (bridge->revision > pci_id->rev)
15730                                         continue;
15731                         }
15732                         if (bridge->subordinate &&
15733                             (bridge->subordinate->number ==
15734                              tp->pdev->bus->number)) {
15735                                 tg3_flag_set(tp, ICH_WORKAROUND);
15736                                 pci_dev_put(bridge);
15737                                 break;
15738                         }
15739                 }
15740         }
15741
15742         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15743                 static struct tg3_dev_id {
15744                         u32     vendor;
15745                         u32     device;
15746                 } bridge_chipsets[] = {
15747                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15748                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15749                         { },
15750                 };
15751                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15752                 struct pci_dev *bridge = NULL;
15753
15754                 while (pci_id->vendor != 0) {
15755                         bridge = pci_get_device(pci_id->vendor,
15756                                                 pci_id->device,
15757                                                 bridge);
15758                         if (!bridge) {
15759                                 pci_id++;
15760                                 continue;
15761                         }
15762                         if (bridge->subordinate &&
15763                             (bridge->subordinate->number <=
15764                              tp->pdev->bus->number) &&
15765                             (bridge->subordinate->busn_res.end >=
15766                              tp->pdev->bus->number)) {
15767                                 tg3_flag_set(tp, 5701_DMA_BUG);
15768                                 pci_dev_put(bridge);
15769                                 break;
15770                         }
15771                 }
15772         }
15773
15774         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15775          * DMA addresses > 40-bit. This bridge may have other additional
15776          * 57xx devices behind it in some 4-port NIC designs for example.
15777          * Any tg3 device found behind the bridge will also need the 40-bit
15778          * DMA workaround.
15779          */
15780         if (tg3_flag(tp, 5780_CLASS)) {
15781                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15782                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15783         } else {
15784                 struct pci_dev *bridge = NULL;
15785
15786                 do {
15787                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15788                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15789                                                 bridge);
15790                         if (bridge && bridge->subordinate &&
15791                             (bridge->subordinate->number <=
15792                              tp->pdev->bus->number) &&
15793                             (bridge->subordinate->busn_res.end >=
15794                              tp->pdev->bus->number)) {
15795                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15796                                 pci_dev_put(bridge);
15797                                 break;
15798                         }
15799                 } while (bridge);
15800         }
15801
15802         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15803             tg3_asic_rev(tp) == ASIC_REV_5714)
15804                 tp->pdev_peer = tg3_find_peer(tp);
15805
15806         /* Determine TSO capabilities */
15807         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15808                 ; /* Do nothing. HW bug. */
15809         else if (tg3_flag(tp, 57765_PLUS))
15810                 tg3_flag_set(tp, HW_TSO_3);
15811         else if (tg3_flag(tp, 5755_PLUS) ||
15812                  tg3_asic_rev(tp) == ASIC_REV_5906)
15813                 tg3_flag_set(tp, HW_TSO_2);
15814         else if (tg3_flag(tp, 5750_PLUS)) {
15815                 tg3_flag_set(tp, HW_TSO_1);
15816                 tg3_flag_set(tp, TSO_BUG);
15817                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15818                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15819                         tg3_flag_clear(tp, TSO_BUG);
15820         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15821                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15822                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15823                 tg3_flag_set(tp, FW_TSO);
15824                 tg3_flag_set(tp, TSO_BUG);
15825                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15826                         tp->fw_needed = FIRMWARE_TG3TSO5;
15827                 else
15828                         tp->fw_needed = FIRMWARE_TG3TSO;
15829         }
15830
15831         /* Selectively allow TSO based on operating conditions */
15832         if (tg3_flag(tp, HW_TSO_1) ||
15833             tg3_flag(tp, HW_TSO_2) ||
15834             tg3_flag(tp, HW_TSO_3) ||
15835             tg3_flag(tp, FW_TSO)) {
15836                 /* For firmware TSO, assume ASF is disabled.
15837                  * We'll disable TSO later if we discover ASF
15838                  * is enabled in tg3_get_eeprom_hw_cfg().
15839                  */
15840                 tg3_flag_set(tp, TSO_CAPABLE);
15841         } else {
15842                 tg3_flag_clear(tp, TSO_CAPABLE);
15843                 tg3_flag_clear(tp, TSO_BUG);
15844                 tp->fw_needed = NULL;
15845         }
15846
15847         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15848                 tp->fw_needed = FIRMWARE_TG3;
15849
15850         if (tg3_asic_rev(tp) == ASIC_REV_57766)
15851                 tp->fw_needed = FIRMWARE_TG357766;
15852
15853         tp->irq_max = 1;
15854
15855         if (tg3_flag(tp, 5750_PLUS)) {
15856                 tg3_flag_set(tp, SUPPORT_MSI);
15857                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15858                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15859                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15860                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15861                      tp->pdev_peer == tp->pdev))
15862                         tg3_flag_clear(tp, SUPPORT_MSI);
15863
15864                 if (tg3_flag(tp, 5755_PLUS) ||
15865                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15866                         tg3_flag_set(tp, 1SHOT_MSI);
15867                 }
15868
15869                 if (tg3_flag(tp, 57765_PLUS)) {
15870                         tg3_flag_set(tp, SUPPORT_MSIX);
15871                         tp->irq_max = TG3_IRQ_MAX_VECS;
15872                 }
15873         }
15874
15875         tp->txq_max = 1;
15876         tp->rxq_max = 1;
15877         if (tp->irq_max > 1) {
15878                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15879                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15880
15881                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15882                     tg3_asic_rev(tp) == ASIC_REV_5720)
15883                         tp->txq_max = tp->irq_max - 1;
15884         }
15885
15886         if (tg3_flag(tp, 5755_PLUS) ||
15887             tg3_asic_rev(tp) == ASIC_REV_5906)
15888                 tg3_flag_set(tp, SHORT_DMA_BUG);
15889
15890         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15891                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15892
15893         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15894             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15895             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15896             tg3_asic_rev(tp) == ASIC_REV_5762)
15897                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15898
15899         if (tg3_flag(tp, 57765_PLUS) &&
15900             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15901                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15902
15903         if (!tg3_flag(tp, 5705_PLUS) ||
15904             tg3_flag(tp, 5780_CLASS) ||
15905             tg3_flag(tp, USE_JUMBO_BDFLAG))
15906                 tg3_flag_set(tp, JUMBO_CAPABLE);
15907
15908         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15909                               &pci_state_reg);
15910
15911         if (pci_is_pcie(tp->pdev)) {
15912                 u16 lnkctl;
15913
15914                 tg3_flag_set(tp, PCI_EXPRESS);
15915
15916                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15917                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15918                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15919                                 tg3_flag_clear(tp, HW_TSO_2);
15920                                 tg3_flag_clear(tp, TSO_CAPABLE);
15921                         }
15922                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15923                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15924                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15925                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15926                                 tg3_flag_set(tp, CLKREQ_BUG);
15927                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15928                         tg3_flag_set(tp, L1PLLPD_EN);
15929                 }
15930         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15931                 /* BCM5785 devices are effectively PCIe devices, and should
15932                  * follow PCIe codepaths, but do not have a PCIe capabilities
15933                  * section.
15934                  */
15935                 tg3_flag_set(tp, PCI_EXPRESS);
15936         } else if (!tg3_flag(tp, 5705_PLUS) ||
15937                    tg3_flag(tp, 5780_CLASS)) {
15938                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15939                 if (!tp->pcix_cap) {
15940                         dev_err(&tp->pdev->dev,
15941                                 "Cannot find PCI-X capability, aborting\n");
15942                         return -EIO;
15943                 }
15944
15945                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15946                         tg3_flag_set(tp, PCIX_MODE);
15947         }
15948
15949         /* If we have an AMD 762 or VIA K8T800 chipset, write
15950          * reordering to the mailbox registers done by the host
15951          * controller can cause major troubles.  We read back from
15952          * every mailbox register write to force the writes to be
15953          * posted to the chip in order.
15954          */
15955         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15956             !tg3_flag(tp, PCI_EXPRESS))
15957                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15958
15959         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15960                              &tp->pci_cacheline_sz);
15961         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15962                              &tp->pci_lat_timer);
15963         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15964             tp->pci_lat_timer < 64) {
15965                 tp->pci_lat_timer = 64;
15966                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15967                                       tp->pci_lat_timer);
15968         }
15969
15970         /* Important! -- It is critical that the PCI-X hw workaround
15971          * situation is decided before the first MMIO register access.
15972          */
15973         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15974                 /* 5700 BX chips need to have their TX producer index
15975                  * mailboxes written twice to workaround a bug.
15976                  */
15977                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15978
15979                 /* If we are in PCI-X mode, enable register write workaround.
15980                  *
15981                  * The workaround is to use indirect register accesses
15982                  * for all chip writes not to mailbox registers.
15983                  */
15984                 if (tg3_flag(tp, PCIX_MODE)) {
15985                         u32 pm_reg;
15986
15987                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15988
15989                         /* The chip can have it's power management PCI config
15990                          * space registers clobbered due to this bug.
15991                          * So explicitly force the chip into D0 here.
15992                          */
15993                         pci_read_config_dword(tp->pdev,
15994                                               tp->pm_cap + PCI_PM_CTRL,
15995                                               &pm_reg);
15996                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15997                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15998                         pci_write_config_dword(tp->pdev,
15999                                                tp->pm_cap + PCI_PM_CTRL,
16000                                                pm_reg);
16001
16002                         /* Also, force SERR#/PERR# in PCI command. */
16003                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16004                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16005                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16006                 }
16007         }
16008
16009         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16010                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16011         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16012                 tg3_flag_set(tp, PCI_32BIT);
16013
16014         /* Chip-specific fixup from Broadcom driver */
16015         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16016             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16017                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16018                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16019         }
16020
16021         /* Default fast path register access methods */
16022         tp->read32 = tg3_read32;
16023         tp->write32 = tg3_write32;
16024         tp->read32_mbox = tg3_read32;
16025         tp->write32_mbox = tg3_write32;
16026         tp->write32_tx_mbox = tg3_write32;
16027         tp->write32_rx_mbox = tg3_write32;
16028
16029         /* Various workaround register access methods */
16030         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16031                 tp->write32 = tg3_write_indirect_reg32;
16032         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16033                  (tg3_flag(tp, PCI_EXPRESS) &&
16034                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16035                 /*
16036                  * Back to back register writes can cause problems on these
16037                  * chips, the workaround is to read back all reg writes
16038                  * except those to mailbox regs.
16039                  *
16040                  * See tg3_write_indirect_reg32().
16041                  */
16042                 tp->write32 = tg3_write_flush_reg32;
16043         }
16044
16045         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16046                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16047                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16048                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16049         }
16050
16051         if (tg3_flag(tp, ICH_WORKAROUND)) {
16052                 tp->read32 = tg3_read_indirect_reg32;
16053                 tp->write32 = tg3_write_indirect_reg32;
16054                 tp->read32_mbox = tg3_read_indirect_mbox;
16055                 tp->write32_mbox = tg3_write_indirect_mbox;
16056                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16057                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16058
16059                 iounmap(tp->regs);
16060                 tp->regs = NULL;
16061
16062                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16063                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16064                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16065         }
16066         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16067                 tp->read32_mbox = tg3_read32_mbox_5906;
16068                 tp->write32_mbox = tg3_write32_mbox_5906;
16069                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16070                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16071         }
16072
16073         if (tp->write32 == tg3_write_indirect_reg32 ||
16074             (tg3_flag(tp, PCIX_MODE) &&
16075              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16076               tg3_asic_rev(tp) == ASIC_REV_5701)))
16077                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16078
16079         /* The memory arbiter has to be enabled in order for SRAM accesses
16080          * to succeed.  Normally on powerup the tg3 chip firmware will make
16081          * sure it is enabled, but other entities such as system netboot
16082          * code might disable it.
16083          */
16084         val = tr32(MEMARB_MODE);
16085         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16086
16087         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16088         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16089             tg3_flag(tp, 5780_CLASS)) {
16090                 if (tg3_flag(tp, PCIX_MODE)) {
16091                         pci_read_config_dword(tp->pdev,
16092                                               tp->pcix_cap + PCI_X_STATUS,
16093                                               &val);
16094                         tp->pci_fn = val & 0x7;
16095                 }
16096         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16097                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16098                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16099                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16100                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16101                         val = tr32(TG3_CPMU_STATUS);
16102
16103                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16104                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16105                 else
16106                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16107                                      TG3_CPMU_STATUS_FSHFT_5719;
16108         }
16109
16110         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16111                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16112                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16113         }
16114
16115         /* Get eeprom hw config before calling tg3_set_power_state().
16116          * In particular, the TG3_FLAG_IS_NIC flag must be
16117          * determined before calling tg3_set_power_state() so that
16118          * we know whether or not to switch out of Vaux power.
16119          * When the flag is set, it means that GPIO1 is used for eeprom
16120          * write protect and also implies that it is a LOM where GPIOs
16121          * are not used to switch power.
16122          */
16123         tg3_get_eeprom_hw_cfg(tp);
16124
16125         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16126                 tg3_flag_clear(tp, TSO_CAPABLE);
16127                 tg3_flag_clear(tp, TSO_BUG);
16128                 tp->fw_needed = NULL;
16129         }
16130
16131         if (tg3_flag(tp, ENABLE_APE)) {
16132                 /* Allow reads and writes to the
16133                  * APE register and memory space.
16134                  */
16135                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16136                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16137                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16138                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16139                                        pci_state_reg);
16140
16141                 tg3_ape_lock_init(tp);
16142         }
16143
16144         /* Set up tp->grc_local_ctrl before calling
16145          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16146          * will bring 5700's external PHY out of reset.
16147          * It is also used as eeprom write protect on LOMs.
16148          */
16149         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16150         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16151             tg3_flag(tp, EEPROM_WRITE_PROT))
16152                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16153                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16154         /* Unused GPIO3 must be driven as output on 5752 because there
16155          * are no pull-up resistors on unused GPIO pins.
16156          */
16157         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16158                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16159
16160         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16161             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16162             tg3_flag(tp, 57765_CLASS))
16163                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16164
16165         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16166             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16167                 /* Turn off the debug UART. */
16168                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16169                 if (tg3_flag(tp, IS_NIC))
16170                         /* Keep VMain power. */
16171                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16172                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16173         }
16174
16175         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16176                 tp->grc_local_ctrl |=
16177                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16178
16179         /* Switch out of Vaux if it is a NIC */
16180         tg3_pwrsrc_switch_to_vmain(tp);
16181
16182         /* Derive initial jumbo mode from MTU assigned in
16183          * ether_setup() via the alloc_etherdev() call
16184          */
16185         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16186                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16187
16188         /* Determine WakeOnLan speed to use. */
16189         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16190             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16191             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16192             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16193                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16194         } else {
16195                 tg3_flag_set(tp, WOL_SPEED_100MB);
16196         }
16197
16198         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16199                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16200
16201         /* A few boards don't want Ethernet@WireSpeed phy feature */
16202         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16203             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16204              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16205              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16206             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16207             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16208                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16209
16210         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16211             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16212                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16213         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16214                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16215
16216         if (tg3_flag(tp, 5705_PLUS) &&
16217             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16218             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16219             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16220             !tg3_flag(tp, 57765_PLUS)) {
16221                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16222                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16223                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16224                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16225                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16226                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16227                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16228                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16229                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16230                 } else
16231                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16232         }
16233
16234         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16235             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16236                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16237                 if (tp->phy_otp == 0)
16238                         tp->phy_otp = TG3_OTP_DEFAULT;
16239         }
16240
16241         if (tg3_flag(tp, CPMU_PRESENT))
16242                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16243         else
16244                 tp->mi_mode = MAC_MI_MODE_BASE;
16245
16246         tp->coalesce_mode = 0;
16247         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16248             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16249                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16250
16251         /* Set these bits to enable statistics workaround. */
16252         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16253             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16254             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16255                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16256                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16257         }
16258
16259         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16260             tg3_asic_rev(tp) == ASIC_REV_57780)
16261                 tg3_flag_set(tp, USE_PHYLIB);
16262
16263         err = tg3_mdio_init(tp);
16264         if (err)
16265                 return err;
16266
16267         /* Initialize data/descriptor byte/word swapping. */
16268         val = tr32(GRC_MODE);
16269         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16270             tg3_asic_rev(tp) == ASIC_REV_5762)
16271                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16272                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16273                         GRC_MODE_B2HRX_ENABLE |
16274                         GRC_MODE_HTX2B_ENABLE |
16275                         GRC_MODE_HOST_STACKUP);
16276         else
16277                 val &= GRC_MODE_HOST_STACKUP;
16278
16279         tw32(GRC_MODE, val | tp->grc_mode);
16280
16281         tg3_switch_clocks(tp);
16282
16283         /* Clear this out for sanity. */
16284         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16285
16286         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16287                               &pci_state_reg);
16288         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16289             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16290                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16291                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16292                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16293                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16294                         void __iomem *sram_base;
16295
16296                         /* Write some dummy words into the SRAM status block
16297                          * area, see if it reads back correctly.  If the return
16298                          * value is bad, force enable the PCIX workaround.
16299                          */
16300                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16301
16302                         writel(0x00000000, sram_base);
16303                         writel(0x00000000, sram_base + 4);
16304                         writel(0xffffffff, sram_base + 4);
16305                         if (readl(sram_base) != 0x00000000)
16306                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16307                 }
16308         }
16309
16310         udelay(50);
16311         tg3_nvram_init(tp);
16312
16313         /* If the device has an NVRAM, no need to load patch firmware */
16314         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16315             !tg3_flag(tp, NO_NVRAM))
16316                 tp->fw_needed = NULL;
16317
16318         grc_misc_cfg = tr32(GRC_MISC_CFG);
16319         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16320
16321         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16322             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16323              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16324                 tg3_flag_set(tp, IS_5788);
16325
16326         if (!tg3_flag(tp, IS_5788) &&
16327             tg3_asic_rev(tp) != ASIC_REV_5700)
16328                 tg3_flag_set(tp, TAGGED_STATUS);
16329         if (tg3_flag(tp, TAGGED_STATUS)) {
16330                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16331                                       HOSTCC_MODE_CLRTICK_TXBD);
16332
16333                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16334                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16335                                        tp->misc_host_ctrl);
16336         }
16337
16338         /* Preserve the APE MAC_MODE bits */
16339         if (tg3_flag(tp, ENABLE_APE))
16340                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16341         else
16342                 tp->mac_mode = 0;
16343
16344         if (tg3_10_100_only_device(tp, ent))
16345                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16346
16347         err = tg3_phy_probe(tp);
16348         if (err) {
16349                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16350                 /* ... but do not return immediately ... */
16351                 tg3_mdio_fini(tp);
16352         }
16353
16354         tg3_read_vpd(tp);
16355         tg3_read_fw_ver(tp);
16356
16357         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16358                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16359         } else {
16360                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16361                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16362                 else
16363                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16364         }
16365
16366         /* 5700 {AX,BX} chips have a broken status block link
16367          * change bit implementation, so we must use the
16368          * status register in those cases.
16369          */
16370         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16371                 tg3_flag_set(tp, USE_LINKCHG_REG);
16372         else
16373                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16374
16375         /* The led_ctrl is set during tg3_phy_probe, here we might
16376          * have to force the link status polling mechanism based
16377          * upon subsystem IDs.
16378          */
16379         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16380             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16381             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16382                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16383                 tg3_flag_set(tp, USE_LINKCHG_REG);
16384         }
16385
16386         /* For all SERDES we poll the MAC status register. */
16387         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16388                 tg3_flag_set(tp, POLL_SERDES);
16389         else
16390                 tg3_flag_clear(tp, POLL_SERDES);
16391
16392         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16393         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16394         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16395             tg3_flag(tp, PCIX_MODE)) {
16396                 tp->rx_offset = NET_SKB_PAD;
16397 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16398                 tp->rx_copy_thresh = ~(u16)0;
16399 #endif
16400         }
16401
16402         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16403         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16404         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16405
16406         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16407
16408         /* Increment the rx prod index on the rx std ring by at most
16409          * 8 for these chips to workaround hw errata.
16410          */
16411         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16412             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16413             tg3_asic_rev(tp) == ASIC_REV_5755)
16414                 tp->rx_std_max_post = 8;
16415
16416         if (tg3_flag(tp, ASPM_WORKAROUND))
16417                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16418                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16419
16420         return err;
16421 }
16422
16423 #ifdef CONFIG_SPARC
16424 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16425 {
16426         struct net_device *dev = tp->dev;
16427         struct pci_dev *pdev = tp->pdev;
16428         struct device_node *dp = pci_device_to_OF_node(pdev);
16429         const unsigned char *addr;
16430         int len;
16431
16432         addr = of_get_property(dp, "local-mac-address", &len);
16433         if (addr && len == 6) {
16434                 memcpy(dev->dev_addr, addr, 6);
16435                 return 0;
16436         }
16437         return -ENODEV;
16438 }
16439
16440 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16441 {
16442         struct net_device *dev = tp->dev;
16443
16444         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16445         return 0;
16446 }
16447 #endif
16448
16449 static int tg3_get_device_address(struct tg3 *tp)
16450 {
16451         struct net_device *dev = tp->dev;
16452         u32 hi, lo, mac_offset;
16453         int addr_ok = 0;
16454         int err;
16455
16456 #ifdef CONFIG_SPARC
16457         if (!tg3_get_macaddr_sparc(tp))
16458                 return 0;
16459 #endif
16460
16461         if (tg3_flag(tp, IS_SSB_CORE)) {
16462                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16463                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16464                         return 0;
16465         }
16466
16467         mac_offset = 0x7c;
16468         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16469             tg3_flag(tp, 5780_CLASS)) {
16470                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16471                         mac_offset = 0xcc;
16472                 if (tg3_nvram_lock(tp))
16473                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16474                 else
16475                         tg3_nvram_unlock(tp);
16476         } else if (tg3_flag(tp, 5717_PLUS)) {
16477                 if (tp->pci_fn & 1)
16478                         mac_offset = 0xcc;
16479                 if (tp->pci_fn > 1)
16480                         mac_offset += 0x18c;
16481         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16482                 mac_offset = 0x10;
16483
16484         /* First try to get it from MAC address mailbox. */
16485         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16486         if ((hi >> 16) == 0x484b) {
16487                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16488                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16489
16490                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16491                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16492                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16493                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16494                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16495
16496                 /* Some old bootcode may report a 0 MAC address in SRAM */
16497                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16498         }
16499         if (!addr_ok) {
16500                 /* Next, try NVRAM. */
16501                 if (!tg3_flag(tp, NO_NVRAM) &&
16502                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16503                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16504                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16505                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16506                 }
16507                 /* Finally just fetch it out of the MAC control regs. */
16508                 else {
16509                         hi = tr32(MAC_ADDR_0_HIGH);
16510                         lo = tr32(MAC_ADDR_0_LOW);
16511
16512                         dev->dev_addr[5] = lo & 0xff;
16513                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16514                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16515                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16516                         dev->dev_addr[1] = hi & 0xff;
16517                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16518                 }
16519         }
16520
16521         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16522 #ifdef CONFIG_SPARC
16523                 if (!tg3_get_default_macaddr_sparc(tp))
16524                         return 0;
16525 #endif
16526                 return -EINVAL;
16527         }
16528         return 0;
16529 }
16530
16531 #define BOUNDARY_SINGLE_CACHELINE       1
16532 #define BOUNDARY_MULTI_CACHELINE        2
16533
16534 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16535 {
16536         int cacheline_size;
16537         u8 byte;
16538         int goal;
16539
16540         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16541         if (byte == 0)
16542                 cacheline_size = 1024;
16543         else
16544                 cacheline_size = (int) byte * 4;
16545
16546         /* On 5703 and later chips, the boundary bits have no
16547          * effect.
16548          */
16549         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16550             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16551             !tg3_flag(tp, PCI_EXPRESS))
16552                 goto out;
16553
16554 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16555         goal = BOUNDARY_MULTI_CACHELINE;
16556 #else
16557 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16558         goal = BOUNDARY_SINGLE_CACHELINE;
16559 #else
16560         goal = 0;
16561 #endif
16562 #endif
16563
16564         if (tg3_flag(tp, 57765_PLUS)) {
16565                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16566                 goto out;
16567         }
16568
16569         if (!goal)
16570                 goto out;
16571
16572         /* PCI controllers on most RISC systems tend to disconnect
16573          * when a device tries to burst across a cache-line boundary.
16574          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16575          *
16576          * Unfortunately, for PCI-E there are only limited
16577          * write-side controls for this, and thus for reads
16578          * we will still get the disconnects.  We'll also waste
16579          * these PCI cycles for both read and write for chips
16580          * other than 5700 and 5701 which do not implement the
16581          * boundary bits.
16582          */
16583         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16584                 switch (cacheline_size) {
16585                 case 16:
16586                 case 32:
16587                 case 64:
16588                 case 128:
16589                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16590                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16591                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16592                         } else {
16593                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16594                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16595                         }
16596                         break;
16597
16598                 case 256:
16599                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16600                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16601                         break;
16602
16603                 default:
16604                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16605                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16606                         break;
16607                 }
16608         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16609                 switch (cacheline_size) {
16610                 case 16:
16611                 case 32:
16612                 case 64:
16613                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16614                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16615                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16616                                 break;
16617                         }
16618                         /* fallthrough */
16619                 case 128:
16620                 default:
16621                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16622                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16623                         break;
16624                 }
16625         } else {
16626                 switch (cacheline_size) {
16627                 case 16:
16628                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16629                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16630                                         DMA_RWCTRL_WRITE_BNDRY_16);
16631                                 break;
16632                         }
16633                         /* fallthrough */
16634                 case 32:
16635                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16636                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16637                                         DMA_RWCTRL_WRITE_BNDRY_32);
16638                                 break;
16639                         }
16640                         /* fallthrough */
16641                 case 64:
16642                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16643                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16644                                         DMA_RWCTRL_WRITE_BNDRY_64);
16645                                 break;
16646                         }
16647                         /* fallthrough */
16648                 case 128:
16649                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16650                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16651                                         DMA_RWCTRL_WRITE_BNDRY_128);
16652                                 break;
16653                         }
16654                         /* fallthrough */
16655                 case 256:
16656                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16657                                 DMA_RWCTRL_WRITE_BNDRY_256);
16658                         break;
16659                 case 512:
16660                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16661                                 DMA_RWCTRL_WRITE_BNDRY_512);
16662                         break;
16663                 case 1024:
16664                 default:
16665                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16666                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16667                         break;
16668                 }
16669         }
16670
16671 out:
16672         return val;
16673 }
16674
16675 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16676                            int size, bool to_device)
16677 {
16678         struct tg3_internal_buffer_desc test_desc;
16679         u32 sram_dma_descs;
16680         int i, ret;
16681
16682         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16683
16684         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16685         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16686         tw32(RDMAC_STATUS, 0);
16687         tw32(WDMAC_STATUS, 0);
16688
16689         tw32(BUFMGR_MODE, 0);
16690         tw32(FTQ_RESET, 0);
16691
16692         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16693         test_desc.addr_lo = buf_dma & 0xffffffff;
16694         test_desc.nic_mbuf = 0x00002100;
16695         test_desc.len = size;
16696
16697         /*
16698          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16699          * the *second* time the tg3 driver was getting loaded after an
16700          * initial scan.
16701          *
16702          * Broadcom tells me:
16703          *   ...the DMA engine is connected to the GRC block and a DMA
16704          *   reset may affect the GRC block in some unpredictable way...
16705          *   The behavior of resets to individual blocks has not been tested.
16706          *
16707          * Broadcom noted the GRC reset will also reset all sub-components.
16708          */
16709         if (to_device) {
16710                 test_desc.cqid_sqid = (13 << 8) | 2;
16711
16712                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16713                 udelay(40);
16714         } else {
16715                 test_desc.cqid_sqid = (16 << 8) | 7;
16716
16717                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16718                 udelay(40);
16719         }
16720         test_desc.flags = 0x00000005;
16721
16722         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16723                 u32 val;
16724
16725                 val = *(((u32 *)&test_desc) + i);
16726                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16727                                        sram_dma_descs + (i * sizeof(u32)));
16728                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16729         }
16730         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16731
16732         if (to_device)
16733                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16734         else
16735                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16736
16737         ret = -ENODEV;
16738         for (i = 0; i < 40; i++) {
16739                 u32 val;
16740
16741                 if (to_device)
16742                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16743                 else
16744                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16745                 if ((val & 0xffff) == sram_dma_descs) {
16746                         ret = 0;
16747                         break;
16748                 }
16749
16750                 udelay(100);
16751         }
16752
16753         return ret;
16754 }
16755
16756 #define TEST_BUFFER_SIZE        0x2000
16757
16758 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16759         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16760         { },
16761 };
16762
16763 static int tg3_test_dma(struct tg3 *tp)
16764 {
16765         dma_addr_t buf_dma;
16766         u32 *buf, saved_dma_rwctrl;
16767         int ret = 0;
16768
16769         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16770                                  &buf_dma, GFP_KERNEL);
16771         if (!buf) {
16772                 ret = -ENOMEM;
16773                 goto out_nofree;
16774         }
16775
16776         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16777                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16778
16779         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16780
16781         if (tg3_flag(tp, 57765_PLUS))
16782                 goto out;
16783
16784         if (tg3_flag(tp, PCI_EXPRESS)) {
16785                 /* DMA read watermark not used on PCIE */
16786                 tp->dma_rwctrl |= 0x00180000;
16787         } else if (!tg3_flag(tp, PCIX_MODE)) {
16788                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16789                     tg3_asic_rev(tp) == ASIC_REV_5750)
16790                         tp->dma_rwctrl |= 0x003f0000;
16791                 else
16792                         tp->dma_rwctrl |= 0x003f000f;
16793         } else {
16794                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16795                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16796                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16797                         u32 read_water = 0x7;
16798
16799                         /* If the 5704 is behind the EPB bridge, we can
16800                          * do the less restrictive ONE_DMA workaround for
16801                          * better performance.
16802                          */
16803                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16804                             tg3_asic_rev(tp) == ASIC_REV_5704)
16805                                 tp->dma_rwctrl |= 0x8000;
16806                         else if (ccval == 0x6 || ccval == 0x7)
16807                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16808
16809                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16810                                 read_water = 4;
16811                         /* Set bit 23 to enable PCIX hw bug fix */
16812                         tp->dma_rwctrl |=
16813                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16814                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16815                                 (1 << 23);
16816                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16817                         /* 5780 always in PCIX mode */
16818                         tp->dma_rwctrl |= 0x00144000;
16819                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16820                         /* 5714 always in PCIX mode */
16821                         tp->dma_rwctrl |= 0x00148000;
16822                 } else {
16823                         tp->dma_rwctrl |= 0x001b000f;
16824                 }
16825         }
16826         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16827                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16828
16829         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16830             tg3_asic_rev(tp) == ASIC_REV_5704)
16831                 tp->dma_rwctrl &= 0xfffffff0;
16832
16833         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16834             tg3_asic_rev(tp) == ASIC_REV_5701) {
16835                 /* Remove this if it causes problems for some boards. */
16836                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16837
16838                 /* On 5700/5701 chips, we need to set this bit.
16839                  * Otherwise the chip will issue cacheline transactions
16840                  * to streamable DMA memory with not all the byte
16841                  * enables turned on.  This is an error on several
16842                  * RISC PCI controllers, in particular sparc64.
16843                  *
16844                  * On 5703/5704 chips, this bit has been reassigned
16845                  * a different meaning.  In particular, it is used
16846                  * on those chips to enable a PCI-X workaround.
16847                  */
16848                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16849         }
16850
16851         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16852
16853 #if 0
16854         /* Unneeded, already done by tg3_get_invariants.  */
16855         tg3_switch_clocks(tp);
16856 #endif
16857
16858         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16859             tg3_asic_rev(tp) != ASIC_REV_5701)
16860                 goto out;
16861
16862         /* It is best to perform DMA test with maximum write burst size
16863          * to expose the 5700/5701 write DMA bug.
16864          */
16865         saved_dma_rwctrl = tp->dma_rwctrl;
16866         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16867         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16868
16869         while (1) {
16870                 u32 *p = buf, i;
16871
16872                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16873                         p[i] = i;
16874
16875                 /* Send the buffer to the chip. */
16876                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16877                 if (ret) {
16878                         dev_err(&tp->pdev->dev,
16879                                 "%s: Buffer write failed. err = %d\n",
16880                                 __func__, ret);
16881                         break;
16882                 }
16883
16884 #if 0
16885                 /* validate data reached card RAM correctly. */
16886                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16887                         u32 val;
16888                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16889                         if (le32_to_cpu(val) != p[i]) {
16890                                 dev_err(&tp->pdev->dev,
16891                                         "%s: Buffer corrupted on device! "
16892                                         "(%d != %d)\n", __func__, val, i);
16893                                 /* ret = -ENODEV here? */
16894                         }
16895                         p[i] = 0;
16896                 }
16897 #endif
16898                 /* Now read it back. */
16899                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16900                 if (ret) {
16901                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16902                                 "err = %d\n", __func__, ret);
16903                         break;
16904                 }
16905
16906                 /* Verify it. */
16907                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16908                         if (p[i] == i)
16909                                 continue;
16910
16911                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16912                             DMA_RWCTRL_WRITE_BNDRY_16) {
16913                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16914                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16915                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16916                                 break;
16917                         } else {
16918                                 dev_err(&tp->pdev->dev,
16919                                         "%s: Buffer corrupted on read back! "
16920                                         "(%d != %d)\n", __func__, p[i], i);
16921                                 ret = -ENODEV;
16922                                 goto out;
16923                         }
16924                 }
16925
16926                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16927                         /* Success. */
16928                         ret = 0;
16929                         break;
16930                 }
16931         }
16932         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16933             DMA_RWCTRL_WRITE_BNDRY_16) {
16934                 /* DMA test passed without adjusting DMA boundary,
16935                  * now look for chipsets that are known to expose the
16936                  * DMA bug without failing the test.
16937                  */
16938                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16939                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16940                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16941                 } else {
16942                         /* Safe to use the calculated DMA boundary. */
16943                         tp->dma_rwctrl = saved_dma_rwctrl;
16944                 }
16945
16946                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16947         }
16948
16949 out:
16950         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16951 out_nofree:
16952         return ret;
16953 }
16954
16955 static void tg3_init_bufmgr_config(struct tg3 *tp)
16956 {
16957         if (tg3_flag(tp, 57765_PLUS)) {
16958                 tp->bufmgr_config.mbuf_read_dma_low_water =
16959                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16960                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16961                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16962                 tp->bufmgr_config.mbuf_high_water =
16963                         DEFAULT_MB_HIGH_WATER_57765;
16964
16965                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16966                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16967                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16968                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16969                 tp->bufmgr_config.mbuf_high_water_jumbo =
16970                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16971         } else if (tg3_flag(tp, 5705_PLUS)) {
16972                 tp->bufmgr_config.mbuf_read_dma_low_water =
16973                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16974                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16975                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16976                 tp->bufmgr_config.mbuf_high_water =
16977                         DEFAULT_MB_HIGH_WATER_5705;
16978                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16979                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16980                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16981                         tp->bufmgr_config.mbuf_high_water =
16982                                 DEFAULT_MB_HIGH_WATER_5906;
16983                 }
16984
16985                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16986                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16987                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16988                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16989                 tp->bufmgr_config.mbuf_high_water_jumbo =
16990                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16991         } else {
16992                 tp->bufmgr_config.mbuf_read_dma_low_water =
16993                         DEFAULT_MB_RDMA_LOW_WATER;
16994                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16995                         DEFAULT_MB_MACRX_LOW_WATER;
16996                 tp->bufmgr_config.mbuf_high_water =
16997                         DEFAULT_MB_HIGH_WATER;
16998
16999                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17000                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17001                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17002                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17003                 tp->bufmgr_config.mbuf_high_water_jumbo =
17004                         DEFAULT_MB_HIGH_WATER_JUMBO;
17005         }
17006
17007         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17008         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17009 }
17010
17011 static char *tg3_phy_string(struct tg3 *tp)
17012 {
17013         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17014         case TG3_PHY_ID_BCM5400:        return "5400";
17015         case TG3_PHY_ID_BCM5401:        return "5401";
17016         case TG3_PHY_ID_BCM5411:        return "5411";
17017         case TG3_PHY_ID_BCM5701:        return "5701";
17018         case TG3_PHY_ID_BCM5703:        return "5703";
17019         case TG3_PHY_ID_BCM5704:        return "5704";
17020         case TG3_PHY_ID_BCM5705:        return "5705";
17021         case TG3_PHY_ID_BCM5750:        return "5750";
17022         case TG3_PHY_ID_BCM5752:        return "5752";
17023         case TG3_PHY_ID_BCM5714:        return "5714";
17024         case TG3_PHY_ID_BCM5780:        return "5780";
17025         case TG3_PHY_ID_BCM5755:        return "5755";
17026         case TG3_PHY_ID_BCM5787:        return "5787";
17027         case TG3_PHY_ID_BCM5784:        return "5784";
17028         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17029         case TG3_PHY_ID_BCM5906:        return "5906";
17030         case TG3_PHY_ID_BCM5761:        return "5761";
17031         case TG3_PHY_ID_BCM5718C:       return "5718C";
17032         case TG3_PHY_ID_BCM5718S:       return "5718S";
17033         case TG3_PHY_ID_BCM57765:       return "57765";
17034         case TG3_PHY_ID_BCM5719C:       return "5719C";
17035         case TG3_PHY_ID_BCM5720C:       return "5720C";
17036         case TG3_PHY_ID_BCM5762:        return "5762C";
17037         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17038         case 0:                 return "serdes";
17039         default:                return "unknown";
17040         }
17041 }
17042
17043 static char *tg3_bus_string(struct tg3 *tp, char *str)
17044 {
17045         if (tg3_flag(tp, PCI_EXPRESS)) {
17046                 strcpy(str, "PCI Express");
17047                 return str;
17048         } else if (tg3_flag(tp, PCIX_MODE)) {
17049                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17050
17051                 strcpy(str, "PCIX:");
17052
17053                 if ((clock_ctrl == 7) ||
17054                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17055                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17056                         strcat(str, "133MHz");
17057                 else if (clock_ctrl == 0)
17058                         strcat(str, "33MHz");
17059                 else if (clock_ctrl == 2)
17060                         strcat(str, "50MHz");
17061                 else if (clock_ctrl == 4)
17062                         strcat(str, "66MHz");
17063                 else if (clock_ctrl == 6)
17064                         strcat(str, "100MHz");
17065         } else {
17066                 strcpy(str, "PCI:");
17067                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17068                         strcat(str, "66MHz");
17069                 else
17070                         strcat(str, "33MHz");
17071         }
17072         if (tg3_flag(tp, PCI_32BIT))
17073                 strcat(str, ":32-bit");
17074         else
17075                 strcat(str, ":64-bit");
17076         return str;
17077 }
17078
17079 static void tg3_init_coal(struct tg3 *tp)
17080 {
17081         struct ethtool_coalesce *ec = &tp->coal;
17082
17083         memset(ec, 0, sizeof(*ec));
17084         ec->cmd = ETHTOOL_GCOALESCE;
17085         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17086         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17087         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17088         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17089         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17090         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17091         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17092         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17093         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17094
17095         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17096                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17097                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17098                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17099                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17100                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17101         }
17102
17103         if (tg3_flag(tp, 5705_PLUS)) {
17104                 ec->rx_coalesce_usecs_irq = 0;
17105                 ec->tx_coalesce_usecs_irq = 0;
17106                 ec->stats_block_coalesce_usecs = 0;
17107         }
17108 }
17109
17110 static int tg3_init_one(struct pci_dev *pdev,
17111                                   const struct pci_device_id *ent)
17112 {
17113         struct net_device *dev;
17114         struct tg3 *tp;
17115         int i, err, pm_cap;
17116         u32 sndmbx, rcvmbx, intmbx;
17117         char str[40];
17118         u64 dma_mask, persist_dma_mask;
17119         netdev_features_t features = 0;
17120
17121         printk_once(KERN_INFO "%s\n", version);
17122
17123         err = pci_enable_device(pdev);
17124         if (err) {
17125                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17126                 return err;
17127         }
17128
17129         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17130         if (err) {
17131                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17132                 goto err_out_disable_pdev;
17133         }
17134
17135         pci_set_master(pdev);
17136
17137         /* Find power-management capability. */
17138         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17139         if (pm_cap == 0) {
17140                 dev_err(&pdev->dev,
17141                         "Cannot find Power Management capability, aborting\n");
17142                 err = -EIO;
17143                 goto err_out_free_res;
17144         }
17145
17146         err = pci_set_power_state(pdev, PCI_D0);
17147         if (err) {
17148                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17149                 goto err_out_free_res;
17150         }
17151
17152         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17153         if (!dev) {
17154                 err = -ENOMEM;
17155                 goto err_out_power_down;
17156         }
17157
17158         SET_NETDEV_DEV(dev, &pdev->dev);
17159
17160         tp = netdev_priv(dev);
17161         tp->pdev = pdev;
17162         tp->dev = dev;
17163         tp->pm_cap = pm_cap;
17164         tp->rx_mode = TG3_DEF_RX_MODE;
17165         tp->tx_mode = TG3_DEF_TX_MODE;
17166         tp->irq_sync = 1;
17167
17168         if (tg3_debug > 0)
17169                 tp->msg_enable = tg3_debug;
17170         else
17171                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17172
17173         if (pdev_is_ssb_gige_core(pdev)) {
17174                 tg3_flag_set(tp, IS_SSB_CORE);
17175                 if (ssb_gige_must_flush_posted_writes(pdev))
17176                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17177                 if (ssb_gige_one_dma_at_once(pdev))
17178                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17179                 if (ssb_gige_have_roboswitch(pdev))
17180                         tg3_flag_set(tp, ROBOSWITCH);
17181                 if (ssb_gige_is_rgmii(pdev))
17182                         tg3_flag_set(tp, RGMII_MODE);
17183         }
17184
17185         /* The word/byte swap controls here control register access byte
17186          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17187          * setting below.
17188          */
17189         tp->misc_host_ctrl =
17190                 MISC_HOST_CTRL_MASK_PCI_INT |
17191                 MISC_HOST_CTRL_WORD_SWAP |
17192                 MISC_HOST_CTRL_INDIR_ACCESS |
17193                 MISC_HOST_CTRL_PCISTATE_RW;
17194
17195         /* The NONFRM (non-frame) byte/word swap controls take effect
17196          * on descriptor entries, anything which isn't packet data.
17197          *
17198          * The StrongARM chips on the board (one for tx, one for rx)
17199          * are running in big-endian mode.
17200          */
17201         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17202                         GRC_MODE_WSWAP_NONFRM_DATA);
17203 #ifdef __BIG_ENDIAN
17204         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17205 #endif
17206         spin_lock_init(&tp->lock);
17207         spin_lock_init(&tp->indirect_lock);
17208         INIT_WORK(&tp->reset_task, tg3_reset_task);
17209
17210         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17211         if (!tp->regs) {
17212                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17213                 err = -ENOMEM;
17214                 goto err_out_free_dev;
17215         }
17216
17217         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17218             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17219             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17220             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17221             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17222             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17223             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17224             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17225             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17226             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17227             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17228             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17229                 tg3_flag_set(tp, ENABLE_APE);
17230                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17231                 if (!tp->aperegs) {
17232                         dev_err(&pdev->dev,
17233                                 "Cannot map APE registers, aborting\n");
17234                         err = -ENOMEM;
17235                         goto err_out_iounmap;
17236                 }
17237         }
17238
17239         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17240         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17241
17242         dev->ethtool_ops = &tg3_ethtool_ops;
17243         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17244         dev->netdev_ops = &tg3_netdev_ops;
17245         dev->irq = pdev->irq;
17246
17247         err = tg3_get_invariants(tp, ent);
17248         if (err) {
17249                 dev_err(&pdev->dev,
17250                         "Problem fetching invariants of chip, aborting\n");
17251                 goto err_out_apeunmap;
17252         }
17253
17254         /* The EPB bridge inside 5714, 5715, and 5780 and any
17255          * device behind the EPB cannot support DMA addresses > 40-bit.
17256          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17257          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17258          * do DMA address check in tg3_start_xmit().
17259          */
17260         if (tg3_flag(tp, IS_5788))
17261                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17262         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17263                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17264 #ifdef CONFIG_HIGHMEM
17265                 dma_mask = DMA_BIT_MASK(64);
17266 #endif
17267         } else
17268                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17269
17270         /* Configure DMA attributes. */
17271         if (dma_mask > DMA_BIT_MASK(32)) {
17272                 err = pci_set_dma_mask(pdev, dma_mask);
17273                 if (!err) {
17274                         features |= NETIF_F_HIGHDMA;
17275                         err = pci_set_consistent_dma_mask(pdev,
17276                                                           persist_dma_mask);
17277                         if (err < 0) {
17278                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17279                                         "DMA for consistent allocations\n");
17280                                 goto err_out_apeunmap;
17281                         }
17282                 }
17283         }
17284         if (err || dma_mask == DMA_BIT_MASK(32)) {
17285                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17286                 if (err) {
17287                         dev_err(&pdev->dev,
17288                                 "No usable DMA configuration, aborting\n");
17289                         goto err_out_apeunmap;
17290                 }
17291         }
17292
17293         tg3_init_bufmgr_config(tp);
17294
17295         features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17296
17297         /* 5700 B0 chips do not support checksumming correctly due
17298          * to hardware bugs.
17299          */
17300         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17301                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17302
17303                 if (tg3_flag(tp, 5755_PLUS))
17304                         features |= NETIF_F_IPV6_CSUM;
17305         }
17306
17307         /* TSO is on by default on chips that support hardware TSO.
17308          * Firmware TSO on older chips gives lower performance, so it
17309          * is off by default, but can be enabled using ethtool.
17310          */
17311         if ((tg3_flag(tp, HW_TSO_1) ||
17312              tg3_flag(tp, HW_TSO_2) ||
17313              tg3_flag(tp, HW_TSO_3)) &&
17314             (features & NETIF_F_IP_CSUM))
17315                 features |= NETIF_F_TSO;
17316         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17317                 if (features & NETIF_F_IPV6_CSUM)
17318                         features |= NETIF_F_TSO6;
17319                 if (tg3_flag(tp, HW_TSO_3) ||
17320                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17321                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17322                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17323                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17324                     tg3_asic_rev(tp) == ASIC_REV_57780)
17325                         features |= NETIF_F_TSO_ECN;
17326         }
17327
17328         dev->features |= features;
17329         dev->vlan_features |= features;
17330
17331         /*
17332          * Add loopback capability only for a subset of devices that support
17333          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17334          * loopback for the remaining devices.
17335          */
17336         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17337             !tg3_flag(tp, CPMU_PRESENT))
17338                 /* Add the loopback capability */
17339                 features |= NETIF_F_LOOPBACK;
17340
17341         dev->hw_features |= features;
17342
17343         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17344             !tg3_flag(tp, TSO_CAPABLE) &&
17345             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17346                 tg3_flag_set(tp, MAX_RXPEND_64);
17347                 tp->rx_pending = 63;
17348         }
17349
17350         err = tg3_get_device_address(tp);
17351         if (err) {
17352                 dev_err(&pdev->dev,
17353                         "Could not obtain valid ethernet address, aborting\n");
17354                 goto err_out_apeunmap;
17355         }
17356
17357         /*
17358          * Reset chip in case UNDI or EFI driver did not shutdown
17359          * DMA self test will enable WDMAC and we'll see (spurious)
17360          * pending DMA on the PCI bus at that point.
17361          */
17362         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17363             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17364                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17365                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17366         }
17367
17368         err = tg3_test_dma(tp);
17369         if (err) {
17370                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17371                 goto err_out_apeunmap;
17372         }
17373
17374         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17375         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17376         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17377         for (i = 0; i < tp->irq_max; i++) {
17378                 struct tg3_napi *tnapi = &tp->napi[i];
17379
17380                 tnapi->tp = tp;
17381                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17382
17383                 tnapi->int_mbox = intmbx;
17384                 if (i <= 4)
17385                         intmbx += 0x8;
17386                 else
17387                         intmbx += 0x4;
17388
17389                 tnapi->consmbox = rcvmbx;
17390                 tnapi->prodmbox = sndmbx;
17391
17392                 if (i)
17393                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17394                 else
17395                         tnapi->coal_now = HOSTCC_MODE_NOW;
17396
17397                 if (!tg3_flag(tp, SUPPORT_MSIX))
17398                         break;
17399
17400                 /*
17401                  * If we support MSIX, we'll be using RSS.  If we're using
17402                  * RSS, the first vector only handles link interrupts and the
17403                  * remaining vectors handle rx and tx interrupts.  Reuse the
17404                  * mailbox values for the next iteration.  The values we setup
17405                  * above are still useful for the single vectored mode.
17406                  */
17407                 if (!i)
17408                         continue;
17409
17410                 rcvmbx += 0x8;
17411
17412                 if (sndmbx & 0x4)
17413                         sndmbx -= 0x4;
17414                 else
17415                         sndmbx += 0xc;
17416         }
17417
17418         tg3_init_coal(tp);
17419
17420         pci_set_drvdata(pdev, dev);
17421
17422         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17423             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17424             tg3_asic_rev(tp) == ASIC_REV_5762)
17425                 tg3_flag_set(tp, PTP_CAPABLE);
17426
17427         if (tg3_flag(tp, 5717_PLUS)) {
17428                 /* Resume a low-power mode */
17429                 tg3_frob_aux_power(tp, false);
17430         }
17431
17432         tg3_timer_init(tp);
17433
17434         tg3_carrier_off(tp);
17435
17436         err = register_netdev(dev);
17437         if (err) {
17438                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17439                 goto err_out_apeunmap;
17440         }
17441
17442         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17443                     tp->board_part_number,
17444                     tg3_chip_rev_id(tp),
17445                     tg3_bus_string(tp, str),
17446                     dev->dev_addr);
17447
17448         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17449                 struct phy_device *phydev;
17450                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17451                 netdev_info(dev,
17452                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17453                             phydev->drv->name, dev_name(&phydev->dev));
17454         } else {
17455                 char *ethtype;
17456
17457                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17458                         ethtype = "10/100Base-TX";
17459                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17460                         ethtype = "1000Base-SX";
17461                 else
17462                         ethtype = "10/100/1000Base-T";
17463
17464                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17465                             "(WireSpeed[%d], EEE[%d])\n",
17466                             tg3_phy_string(tp), ethtype,
17467                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17468                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17469         }
17470
17471         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17472                     (dev->features & NETIF_F_RXCSUM) != 0,
17473                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17474                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17475                     tg3_flag(tp, ENABLE_ASF) != 0,
17476                     tg3_flag(tp, TSO_CAPABLE) != 0);
17477         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17478                     tp->dma_rwctrl,
17479                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17480                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17481
17482         pci_save_state(pdev);
17483
17484         return 0;
17485
17486 err_out_apeunmap:
17487         if (tp->aperegs) {
17488                 iounmap(tp->aperegs);
17489                 tp->aperegs = NULL;
17490         }
17491
17492 err_out_iounmap:
17493         if (tp->regs) {
17494                 iounmap(tp->regs);
17495                 tp->regs = NULL;
17496         }
17497
17498 err_out_free_dev:
17499         free_netdev(dev);
17500
17501 err_out_power_down:
17502         pci_set_power_state(pdev, PCI_D3hot);
17503
17504 err_out_free_res:
17505         pci_release_regions(pdev);
17506
17507 err_out_disable_pdev:
17508         pci_disable_device(pdev);
17509         pci_set_drvdata(pdev, NULL);
17510         return err;
17511 }
17512
17513 static void tg3_remove_one(struct pci_dev *pdev)
17514 {
17515         struct net_device *dev = pci_get_drvdata(pdev);
17516
17517         if (dev) {
17518                 struct tg3 *tp = netdev_priv(dev);
17519
17520                 release_firmware(tp->fw);
17521
17522                 tg3_reset_task_cancel(tp);
17523
17524                 if (tg3_flag(tp, USE_PHYLIB)) {
17525                         tg3_phy_fini(tp);
17526                         tg3_mdio_fini(tp);
17527                 }
17528
17529                 unregister_netdev(dev);
17530                 if (tp->aperegs) {
17531                         iounmap(tp->aperegs);
17532                         tp->aperegs = NULL;
17533                 }
17534                 if (tp->regs) {
17535                         iounmap(tp->regs);
17536                         tp->regs = NULL;
17537                 }
17538                 free_netdev(dev);
17539                 pci_release_regions(pdev);
17540                 pci_disable_device(pdev);
17541                 pci_set_drvdata(pdev, NULL);
17542         }
17543 }
17544
17545 #ifdef CONFIG_PM_SLEEP
17546 static int tg3_suspend(struct device *device)
17547 {
17548         struct pci_dev *pdev = to_pci_dev(device);
17549         struct net_device *dev = pci_get_drvdata(pdev);
17550         struct tg3 *tp = netdev_priv(dev);
17551         int err;
17552
17553         if (!netif_running(dev))
17554                 return 0;
17555
17556         tg3_reset_task_cancel(tp);
17557         tg3_phy_stop(tp);
17558         tg3_netif_stop(tp);
17559
17560         tg3_timer_stop(tp);
17561
17562         tg3_full_lock(tp, 1);
17563         tg3_disable_ints(tp);
17564         tg3_full_unlock(tp);
17565
17566         netif_device_detach(dev);
17567
17568         tg3_full_lock(tp, 0);
17569         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17570         tg3_flag_clear(tp, INIT_COMPLETE);
17571         tg3_full_unlock(tp);
17572
17573         err = tg3_power_down_prepare(tp);
17574         if (err) {
17575                 int err2;
17576
17577                 tg3_full_lock(tp, 0);
17578
17579                 tg3_flag_set(tp, INIT_COMPLETE);
17580                 err2 = tg3_restart_hw(tp, true);
17581                 if (err2)
17582                         goto out;
17583
17584                 tg3_timer_start(tp);
17585
17586                 netif_device_attach(dev);
17587                 tg3_netif_start(tp);
17588
17589 out:
17590                 tg3_full_unlock(tp);
17591
17592                 if (!err2)
17593                         tg3_phy_start(tp);
17594         }
17595
17596         return err;
17597 }
17598
17599 static int tg3_resume(struct device *device)
17600 {
17601         struct pci_dev *pdev = to_pci_dev(device);
17602         struct net_device *dev = pci_get_drvdata(pdev);
17603         struct tg3 *tp = netdev_priv(dev);
17604         int err;
17605
17606         if (!netif_running(dev))
17607                 return 0;
17608
17609         netif_device_attach(dev);
17610
17611         tg3_full_lock(tp, 0);
17612
17613         tg3_flag_set(tp, INIT_COMPLETE);
17614         err = tg3_restart_hw(tp,
17615                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17616         if (err)
17617                 goto out;
17618
17619         tg3_timer_start(tp);
17620
17621         tg3_netif_start(tp);
17622
17623 out:
17624         tg3_full_unlock(tp);
17625
17626         if (!err)
17627                 tg3_phy_start(tp);
17628
17629         return err;
17630 }
17631 #endif /* CONFIG_PM_SLEEP */
17632
17633 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17634
17635 /**
17636  * tg3_io_error_detected - called when PCI error is detected
17637  * @pdev: Pointer to PCI device
17638  * @state: The current pci connection state
17639  *
17640  * This function is called after a PCI bus error affecting
17641  * this device has been detected.
17642  */
17643 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17644                                               pci_channel_state_t state)
17645 {
17646         struct net_device *netdev = pci_get_drvdata(pdev);
17647         struct tg3 *tp = netdev_priv(netdev);
17648         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17649
17650         netdev_info(netdev, "PCI I/O error detected\n");
17651
17652         rtnl_lock();
17653
17654         if (!netif_running(netdev))
17655                 goto done;
17656
17657         tg3_phy_stop(tp);
17658
17659         tg3_netif_stop(tp);
17660
17661         tg3_timer_stop(tp);
17662
17663         /* Want to make sure that the reset task doesn't run */
17664         tg3_reset_task_cancel(tp);
17665
17666         netif_device_detach(netdev);
17667
17668         /* Clean up software state, even if MMIO is blocked */
17669         tg3_full_lock(tp, 0);
17670         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17671         tg3_full_unlock(tp);
17672
17673 done:
17674         if (state == pci_channel_io_perm_failure)
17675                 err = PCI_ERS_RESULT_DISCONNECT;
17676         else
17677                 pci_disable_device(pdev);
17678
17679         rtnl_unlock();
17680
17681         return err;
17682 }
17683
17684 /**
17685  * tg3_io_slot_reset - called after the pci bus has been reset.
17686  * @pdev: Pointer to PCI device
17687  *
17688  * Restart the card from scratch, as if from a cold-boot.
17689  * At this point, the card has exprienced a hard reset,
17690  * followed by fixups by BIOS, and has its config space
17691  * set up identically to what it was at cold boot.
17692  */
17693 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17694 {
17695         struct net_device *netdev = pci_get_drvdata(pdev);
17696         struct tg3 *tp = netdev_priv(netdev);
17697         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17698         int err;
17699
17700         rtnl_lock();
17701
17702         if (pci_enable_device(pdev)) {
17703                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17704                 goto done;
17705         }
17706
17707         pci_set_master(pdev);
17708         pci_restore_state(pdev);
17709         pci_save_state(pdev);
17710
17711         if (!netif_running(netdev)) {
17712                 rc = PCI_ERS_RESULT_RECOVERED;
17713                 goto done;
17714         }
17715
17716         err = tg3_power_up(tp);
17717         if (err)
17718                 goto done;
17719
17720         rc = PCI_ERS_RESULT_RECOVERED;
17721
17722 done:
17723         rtnl_unlock();
17724
17725         return rc;
17726 }
17727
17728 /**
17729  * tg3_io_resume - called when traffic can start flowing again.
17730  * @pdev: Pointer to PCI device
17731  *
17732  * This callback is called when the error recovery driver tells
17733  * us that its OK to resume normal operation.
17734  */
17735 static void tg3_io_resume(struct pci_dev *pdev)
17736 {
17737         struct net_device *netdev = pci_get_drvdata(pdev);
17738         struct tg3 *tp = netdev_priv(netdev);
17739         int err;
17740
17741         rtnl_lock();
17742
17743         if (!netif_running(netdev))
17744                 goto done;
17745
17746         tg3_full_lock(tp, 0);
17747         tg3_flag_set(tp, INIT_COMPLETE);
17748         err = tg3_restart_hw(tp, true);
17749         if (err) {
17750                 tg3_full_unlock(tp);
17751                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17752                 goto done;
17753         }
17754
17755         netif_device_attach(netdev);
17756
17757         tg3_timer_start(tp);
17758
17759         tg3_netif_start(tp);
17760
17761         tg3_full_unlock(tp);
17762
17763         tg3_phy_start(tp);
17764
17765 done:
17766         rtnl_unlock();
17767 }
17768
17769 static const struct pci_error_handlers tg3_err_handler = {
17770         .error_detected = tg3_io_error_detected,
17771         .slot_reset     = tg3_io_slot_reset,
17772         .resume         = tg3_io_resume
17773 };
17774
17775 static struct pci_driver tg3_driver = {
17776         .name           = DRV_MODULE_NAME,
17777         .id_table       = tg3_pci_tbl,
17778         .probe          = tg3_init_one,
17779         .remove         = tg3_remove_one,
17780         .err_handler    = &tg3_err_handler,
17781         .driver.pm      = &tg3_pm_ops,
17782 };
17783
17784 static int __init tg3_init(void)
17785 {
17786         return pci_register_driver(&tg3_driver);
17787 }
17788
17789 static void __exit tg3_cleanup(void)
17790 {
17791         pci_unregister_driver(&tg3_driver);
17792 }
17793
17794 module_init(tg3_init);
17795 module_exit(tg3_cleanup);