Merge branch 'fix/hda' into topic/hda
[linux-2.6-block.git] / drivers / net / forcedeth.c
CommitLineData
1da177e4
LT
1/*
2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
3 *
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
87046e50 6 * and Andrew de Quincey.
1da177e4
LT
7 *
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
10 * countries.
11 *
1836098f 12 * Copyright (C) 2003,4,5 Manfred Spraul
1da177e4
LT
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
f1405d32 16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
1da177e4
LT
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 *
1da177e4
LT
32 * Known bugs:
33 * We suspect that on some hardware no TX done interrupts are generated.
34 * This means recovery from netif_stop_queue only happens if the hw timer
35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37 * If your hardware reliably generates tx done interrupts, then you can remove
38 * DEV_NEED_TIMERIRQ from the driver_data flags.
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic.
41 */
3e1a3ce2 42#define FORCEDETH_VERSION "0.64"
1da177e4
LT
43#define DRV_NAME "forcedeth"
44
45#include <linux/module.h>
46#include <linux/types.h>
47#include <linux/pci.h>
48#include <linux/interrupt.h>
49#include <linux/netdevice.h>
50#include <linux/etherdevice.h>
51#include <linux/delay.h>
52#include <linux/spinlock.h>
53#include <linux/ethtool.h>
54#include <linux/timer.h>
55#include <linux/skbuff.h>
56#include <linux/mii.h>
57#include <linux/random.h>
58#include <linux/init.h>
22c6d143 59#include <linux/if_vlan.h>
910638ae 60#include <linux/dma-mapping.h>
1da177e4
LT
61
62#include <asm/irq.h>
63#include <asm/io.h>
64#include <asm/uaccess.h>
65#include <asm/system.h>
66
67#if 0
68#define dprintk printk
69#else
70#define dprintk(x...) do { } while (0)
71#endif
72
bea3348e
SH
73#define TX_WORK_PER_LOOP 64
74#define RX_WORK_PER_LOOP 64
1da177e4
LT
75
76/*
77 * Hardware access:
78 */
79
3c2e1c11
AA
80#define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
81#define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
82#define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
83#define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
84#define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
85#define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
86#define DEV_HAS_MSI 0x0000040 /* device supports MSI */
87#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
88#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
89#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
90#define DEV_HAS_STATISTICS_V2 0x0000600 /* device supports hw statistics version 2 */
91#define DEV_HAS_STATISTICS_V3 0x0000e00 /* device supports hw statistics version 3 */
92#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
93#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
94#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
95#define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
96#define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
97#define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
98#define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
99#define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
100#define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
101#define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
102#define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
103#define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
104#define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
1da177e4
LT
105
106enum {
107 NvRegIrqStatus = 0x000,
108#define NVREG_IRQSTAT_MIIEVENT 0x040
daa91a9d 109#define NVREG_IRQSTAT_MASK 0x83ff
1da177e4
LT
110 NvRegIrqMask = 0x004,
111#define NVREG_IRQ_RX_ERROR 0x0001
112#define NVREG_IRQ_RX 0x0002
113#define NVREG_IRQ_RX_NOBUF 0x0004
114#define NVREG_IRQ_TX_ERR 0x0008
c2dba06d 115#define NVREG_IRQ_TX_OK 0x0010
1da177e4
LT
116#define NVREG_IRQ_TIMER 0x0020
117#define NVREG_IRQ_LINK 0x0040
d33a73c8
AA
118#define NVREG_IRQ_RX_FORCED 0x0080
119#define NVREG_IRQ_TX_FORCED 0x0100
daa91a9d 120#define NVREG_IRQ_RECOVER_ERROR 0x8200
a971c324 121#define NVREG_IRQMASK_THROUGHPUT 0x00df
096a458c 122#define NVREG_IRQMASK_CPU 0x0060
d33a73c8
AA
123#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
124#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
c5cf9101 125#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
c2dba06d 126
1da177e4
LT
127 NvRegUnknownSetupReg6 = 0x008,
128#define NVREG_UNKSETUP6_VAL 3
129
130/*
131 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
132 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
133 */
134 NvRegPollingInterval = 0x00c,
6cef67a0 135#define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */
a971c324 136#define NVREG_POLL_DEFAULT_CPU 13
d33a73c8
AA
137 NvRegMSIMap0 = 0x020,
138 NvRegMSIMap1 = 0x024,
139 NvRegMSIIrqMask = 0x030,
140#define NVREG_MSI_VECTOR_0_ENABLED 0x01
1da177e4 141 NvRegMisc1 = 0x080,
eb91f61b 142#define NVREG_MISC1_PAUSE_TX 0x01
1da177e4
LT
143#define NVREG_MISC1_HD 0x02
144#define NVREG_MISC1_FORCE 0x3b0f3c
145
0a62677b 146 NvRegMacReset = 0x34,
86a0f043 147#define NVREG_MAC_RESET_ASSERT 0x0F3
1da177e4
LT
148 NvRegTransmitterControl = 0x084,
149#define NVREG_XMITCTL_START 0x01
7e680c22
AA
150#define NVREG_XMITCTL_MGMT_ST 0x40000000
151#define NVREG_XMITCTL_SYNC_MASK 0x000f0000
152#define NVREG_XMITCTL_SYNC_NOT_READY 0x0
153#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
154#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
155#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
156#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
157#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
158#define NVREG_XMITCTL_HOST_LOADED 0x00004000
f35723ec 159#define NVREG_XMITCTL_TX_PATH_EN 0x01000000
cac1c52c
AA
160#define NVREG_XMITCTL_DATA_START 0x00100000
161#define NVREG_XMITCTL_DATA_READY 0x00010000
162#define NVREG_XMITCTL_DATA_ERROR 0x00020000
1da177e4
LT
163 NvRegTransmitterStatus = 0x088,
164#define NVREG_XMITSTAT_BUSY 0x01
165
166 NvRegPacketFilterFlags = 0x8c,
eb91f61b
AA
167#define NVREG_PFF_PAUSE_RX 0x08
168#define NVREG_PFF_ALWAYS 0x7F0000
1da177e4
LT
169#define NVREG_PFF_PROMISC 0x80
170#define NVREG_PFF_MYADDR 0x20
9589c77a 171#define NVREG_PFF_LOOPBACK 0x10
1da177e4
LT
172
173 NvRegOffloadConfig = 0x90,
174#define NVREG_OFFLOAD_HOMEPHY 0x601
175#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
176 NvRegReceiverControl = 0x094,
177#define NVREG_RCVCTL_START 0x01
f35723ec 178#define NVREG_RCVCTL_RX_PATH_EN 0x01000000
1da177e4
LT
179 NvRegReceiverStatus = 0x98,
180#define NVREG_RCVSTAT_BUSY 0x01
181
a433686c
AA
182 NvRegSlotTime = 0x9c,
183#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
184#define NVREG_SLOTTIME_10_100_FULL 0x00007f00
185#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
186#define NVREG_SLOTTIME_HALF 0x0000ff00
187#define NVREG_SLOTTIME_DEFAULT 0x00007f00
188#define NVREG_SLOTTIME_MASK 0x000000ff
1da177e4 189
9744e218 190 NvRegTxDeferral = 0xA0,
fd9b558c
AA
191#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
192#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
193#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
194#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
195#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
196#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
9744e218
AA
197 NvRegRxDeferral = 0xA4,
198#define NVREG_RX_DEFERRAL_DEFAULT 0x16
1da177e4
LT
199 NvRegMacAddrA = 0xA8,
200 NvRegMacAddrB = 0xAC,
201 NvRegMulticastAddrA = 0xB0,
202#define NVREG_MCASTADDRA_FORCE 0x01
203 NvRegMulticastAddrB = 0xB4,
204 NvRegMulticastMaskA = 0xB8,
bb9a4fd1 205#define NVREG_MCASTMASKA_NONE 0xffffffff
1da177e4 206 NvRegMulticastMaskB = 0xBC,
bb9a4fd1 207#define NVREG_MCASTMASKB_NONE 0xffff
1da177e4
LT
208
209 NvRegPhyInterface = 0xC0,
210#define PHY_RGMII 0x10000000
a433686c
AA
211 NvRegBackOffControl = 0xC4,
212#define NVREG_BKOFFCTRL_DEFAULT 0x70000000
213#define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
214#define NVREG_BKOFFCTRL_SELECT 24
215#define NVREG_BKOFFCTRL_GEAR 12
1da177e4
LT
216
217 NvRegTxRingPhysAddr = 0x100,
218 NvRegRxRingPhysAddr = 0x104,
219 NvRegRingSizes = 0x108,
220#define NVREG_RINGSZ_TXSHIFT 0
221#define NVREG_RINGSZ_RXSHIFT 16
5070d340
AA
222 NvRegTransmitPoll = 0x10c,
223#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
1da177e4
LT
224 NvRegLinkSpeed = 0x110,
225#define NVREG_LINKSPEED_FORCE 0x10000
226#define NVREG_LINKSPEED_10 1000
227#define NVREG_LINKSPEED_100 100
228#define NVREG_LINKSPEED_1000 50
229#define NVREG_LINKSPEED_MASK (0xFFF)
230 NvRegUnknownSetupReg5 = 0x130,
231#define NVREG_UNKSETUP5_BIT31 (1<<31)
95d161cb
AA
232 NvRegTxWatermark = 0x13c,
233#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
234#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
235#define NVREG_TX_WM_DESC2_3_1000 0xfe08000
1da177e4
LT
236 NvRegTxRxControl = 0x144,
237#define NVREG_TXRXCTL_KICK 0x0001
238#define NVREG_TXRXCTL_BIT1 0x0002
239#define NVREG_TXRXCTL_BIT2 0x0004
240#define NVREG_TXRXCTL_IDLE 0x0008
241#define NVREG_TXRXCTL_RESET 0x0010
242#define NVREG_TXRXCTL_RXCHECK 0x0400
8a4ae7f2 243#define NVREG_TXRXCTL_DESC_1 0
d2f78412
AA
244#define NVREG_TXRXCTL_DESC_2 0x002100
245#define NVREG_TXRXCTL_DESC_3 0xc02200
ee407b02
AA
246#define NVREG_TXRXCTL_VLANSTRIP 0x00040
247#define NVREG_TXRXCTL_VLANINS 0x00080
0832b25a
AA
248 NvRegTxRingPhysAddrHigh = 0x148,
249 NvRegRxRingPhysAddrHigh = 0x14C,
eb91f61b 250 NvRegTxPauseFrame = 0x170,
5289b4c4
AA
251#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
252#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
253#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
254#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
9a33e883
AA
255 NvRegTxPauseFrameLimit = 0x174,
256#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
1da177e4
LT
257 NvRegMIIStatus = 0x180,
258#define NVREG_MIISTAT_ERROR 0x0001
259#define NVREG_MIISTAT_LINKCHANGE 0x0008
eb798428
AA
260#define NVREG_MIISTAT_MASK_RW 0x0007
261#define NVREG_MIISTAT_MASK_ALL 0x000f
7e680c22
AA
262 NvRegMIIMask = 0x184,
263#define NVREG_MII_LINKCHANGE 0x0008
1da177e4
LT
264
265 NvRegAdapterControl = 0x188,
266#define NVREG_ADAPTCTL_START 0x02
267#define NVREG_ADAPTCTL_LINKUP 0x04
268#define NVREG_ADAPTCTL_PHYVALID 0x40000
269#define NVREG_ADAPTCTL_RUNNING 0x100000
270#define NVREG_ADAPTCTL_PHYSHIFT 24
271 NvRegMIISpeed = 0x18c,
272#define NVREG_MIISPEED_BIT8 (1<<8)
273#define NVREG_MIIDELAY 5
274 NvRegMIIControl = 0x190,
275#define NVREG_MIICTL_INUSE 0x08000
276#define NVREG_MIICTL_WRITE 0x00400
277#define NVREG_MIICTL_ADDRSHIFT 5
278 NvRegMIIData = 0x194,
9c662435
AA
279 NvRegTxUnicast = 0x1a0,
280 NvRegTxMulticast = 0x1a4,
281 NvRegTxBroadcast = 0x1a8,
1da177e4
LT
282 NvRegWakeUpFlags = 0x200,
283#define NVREG_WAKEUPFLAGS_VAL 0x7770
284#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
285#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
286#define NVREG_WAKEUPFLAGS_D3SHIFT 12
287#define NVREG_WAKEUPFLAGS_D2SHIFT 8
288#define NVREG_WAKEUPFLAGS_D1SHIFT 4
289#define NVREG_WAKEUPFLAGS_D0SHIFT 0
290#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
291#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
292#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
293#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
294
cac1c52c
AA
295 NvRegMgmtUnitGetVersion = 0x204,
296#define NVREG_MGMTUNITGETVERSION 0x01
297 NvRegMgmtUnitVersion = 0x208,
298#define NVREG_MGMTUNITVERSION 0x08
1da177e4
LT
299 NvRegPowerCap = 0x268,
300#define NVREG_POWERCAP_D3SUPP (1<<30)
301#define NVREG_POWERCAP_D2SUPP (1<<26)
302#define NVREG_POWERCAP_D1SUPP (1<<25)
303 NvRegPowerState = 0x26c,
304#define NVREG_POWERSTATE_POWEREDUP 0x8000
305#define NVREG_POWERSTATE_VALID 0x0100
306#define NVREG_POWERSTATE_MASK 0x0003
307#define NVREG_POWERSTATE_D0 0x0000
308#define NVREG_POWERSTATE_D1 0x0001
309#define NVREG_POWERSTATE_D2 0x0002
310#define NVREG_POWERSTATE_D3 0x0003
cac1c52c
AA
311 NvRegMgmtUnitControl = 0x278,
312#define NVREG_MGMTUNITCONTROL_INUSE 0x20000
52da3578
AA
313 NvRegTxCnt = 0x280,
314 NvRegTxZeroReXmt = 0x284,
315 NvRegTxOneReXmt = 0x288,
316 NvRegTxManyReXmt = 0x28c,
317 NvRegTxLateCol = 0x290,
318 NvRegTxUnderflow = 0x294,
319 NvRegTxLossCarrier = 0x298,
320 NvRegTxExcessDef = 0x29c,
321 NvRegTxRetryErr = 0x2a0,
322 NvRegRxFrameErr = 0x2a4,
323 NvRegRxExtraByte = 0x2a8,
324 NvRegRxLateCol = 0x2ac,
325 NvRegRxRunt = 0x2b0,
326 NvRegRxFrameTooLong = 0x2b4,
327 NvRegRxOverflow = 0x2b8,
328 NvRegRxFCSErr = 0x2bc,
329 NvRegRxFrameAlignErr = 0x2c0,
330 NvRegRxLenErr = 0x2c4,
331 NvRegRxUnicast = 0x2c8,
332 NvRegRxMulticast = 0x2cc,
333 NvRegRxBroadcast = 0x2d0,
334 NvRegTxDef = 0x2d4,
335 NvRegTxFrame = 0x2d8,
336 NvRegRxCnt = 0x2dc,
337 NvRegTxPause = 0x2e0,
338 NvRegRxPause = 0x2e4,
339 NvRegRxDropFrame = 0x2e8,
ee407b02
AA
340 NvRegVlanControl = 0x300,
341#define NVREG_VLANCONTROL_ENABLE 0x2000
d33a73c8
AA
342 NvRegMSIXMap0 = 0x3e0,
343 NvRegMSIXMap1 = 0x3e4,
344 NvRegMSIXIrqStatus = 0x3f0,
86a0f043
AA
345
346 NvRegPowerState2 = 0x600,
1545e205 347#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
86a0f043 348#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
22ae03a1 349#define NVREG_POWERSTATE2_PHY_RESET 0x0004
88d7d8b0 350#define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
1da177e4
LT
351};
352
353/* Big endian: should work, but is untested */
354struct ring_desc {
a8bed49e
SH
355 __le32 buf;
356 __le32 flaglen;
1da177e4
LT
357};
358
ee73362c 359struct ring_desc_ex {
a8bed49e
SH
360 __le32 bufhigh;
361 __le32 buflow;
362 __le32 txvlan;
363 __le32 flaglen;
ee73362c
MS
364};
365
f82a9352 366union ring_type {
ee73362c
MS
367 struct ring_desc* orig;
368 struct ring_desc_ex* ex;
f82a9352 369};
ee73362c 370
1da177e4
LT
371#define FLAG_MASK_V1 0xffff0000
372#define FLAG_MASK_V2 0xffffc000
373#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
374#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
375
376#define NV_TX_LASTPACKET (1<<16)
377#define NV_TX_RETRYERROR (1<<19)
a433686c 378#define NV_TX_RETRYCOUNT_MASK (0xF<<20)
c2dba06d 379#define NV_TX_FORCED_INTERRUPT (1<<24)
1da177e4
LT
380#define NV_TX_DEFERRED (1<<26)
381#define NV_TX_CARRIERLOST (1<<27)
382#define NV_TX_LATECOLLISION (1<<28)
383#define NV_TX_UNDERFLOW (1<<29)
384#define NV_TX_ERROR (1<<30)
385#define NV_TX_VALID (1<<31)
386
387#define NV_TX2_LASTPACKET (1<<29)
388#define NV_TX2_RETRYERROR (1<<18)
a433686c 389#define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
c2dba06d 390#define NV_TX2_FORCED_INTERRUPT (1<<30)
1da177e4
LT
391#define NV_TX2_DEFERRED (1<<25)
392#define NV_TX2_CARRIERLOST (1<<26)
393#define NV_TX2_LATECOLLISION (1<<27)
394#define NV_TX2_UNDERFLOW (1<<28)
395/* error and valid are the same for both */
396#define NV_TX2_ERROR (1<<30)
397#define NV_TX2_VALID (1<<31)
ac9c1897
AA
398#define NV_TX2_TSO (1<<28)
399#define NV_TX2_TSO_SHIFT 14
fa45459e
AA
400#define NV_TX2_TSO_MAX_SHIFT 14
401#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
8a4ae7f2
MS
402#define NV_TX2_CHECKSUM_L3 (1<<27)
403#define NV_TX2_CHECKSUM_L4 (1<<26)
1da177e4 404
ee407b02
AA
405#define NV_TX3_VLAN_TAG_PRESENT (1<<18)
406
1da177e4
LT
407#define NV_RX_DESCRIPTORVALID (1<<16)
408#define NV_RX_MISSEDFRAME (1<<17)
409#define NV_RX_SUBSTRACT1 (1<<18)
410#define NV_RX_ERROR1 (1<<23)
411#define NV_RX_ERROR2 (1<<24)
412#define NV_RX_ERROR3 (1<<25)
413#define NV_RX_ERROR4 (1<<26)
414#define NV_RX_CRCERR (1<<27)
415#define NV_RX_OVERFLOW (1<<28)
416#define NV_RX_FRAMINGERR (1<<29)
417#define NV_RX_ERROR (1<<30)
418#define NV_RX_AVAIL (1<<31)
1ef6841b 419#define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
1da177e4
LT
420
421#define NV_RX2_CHECKSUMMASK (0x1C000000)
bfaffe8f
AA
422#define NV_RX2_CHECKSUM_IP (0x10000000)
423#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
424#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
1da177e4
LT
425#define NV_RX2_DESCRIPTORVALID (1<<29)
426#define NV_RX2_SUBSTRACT1 (1<<25)
427#define NV_RX2_ERROR1 (1<<18)
428#define NV_RX2_ERROR2 (1<<19)
429#define NV_RX2_ERROR3 (1<<20)
430#define NV_RX2_ERROR4 (1<<21)
431#define NV_RX2_CRCERR (1<<22)
432#define NV_RX2_OVERFLOW (1<<23)
433#define NV_RX2_FRAMINGERR (1<<24)
434/* error and avail are the same for both */
435#define NV_RX2_ERROR (1<<30)
436#define NV_RX2_AVAIL (1<<31)
1ef6841b 437#define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
1da177e4 438
ee407b02
AA
439#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
440#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
441
1da177e4 442/* Miscelaneous hardware related defines: */
86a0f043 443#define NV_PCI_REGSZ_VER1 0x270
57fff698
AA
444#define NV_PCI_REGSZ_VER2 0x2d4
445#define NV_PCI_REGSZ_VER3 0x604
1a1ca861 446#define NV_PCI_REGSZ_MAX 0x604
1da177e4
LT
447
448/* various timeout delays: all in usec */
449#define NV_TXRX_RESET_DELAY 4
450#define NV_TXSTOP_DELAY1 10
451#define NV_TXSTOP_DELAY1MAX 500000
452#define NV_TXSTOP_DELAY2 100
453#define NV_RXSTOP_DELAY1 10
454#define NV_RXSTOP_DELAY1MAX 500000
455#define NV_RXSTOP_DELAY2 100
456#define NV_SETUP5_DELAY 5
457#define NV_SETUP5_DELAYMAX 50000
458#define NV_POWERUP_DELAY 5
459#define NV_POWERUP_DELAYMAX 5000
460#define NV_MIIBUSY_DELAY 50
461#define NV_MIIPHY_DELAY 10
462#define NV_MIIPHY_DELAYMAX 10000
86a0f043 463#define NV_MAC_RESET_DELAY 64
1da177e4
LT
464
465#define NV_WAKEUPPATTERNS 5
466#define NV_WAKEUPMASKENTRIES 4
467
468/* General driver defaults */
469#define NV_WATCHDOG_TIMEO (5*HZ)
470
6cef67a0 471#define RX_RING_DEFAULT 512
eafa59f6
AA
472#define TX_RING_DEFAULT 256
473#define RX_RING_MIN 128
474#define TX_RING_MIN 64
475#define RING_MAX_DESC_VER_1 1024
476#define RING_MAX_DESC_VER_2_3 16384
1da177e4
LT
477
478/* rx/tx mac addr + type + vlan + align + slack*/
d81c0983
MS
479#define NV_RX_HEADERS (64)
480/* even more slack. */
481#define NV_RX_ALLOC_PAD (64)
482
483/* maximum mtu size */
484#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
485#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
1da177e4
LT
486
487#define OOM_REFILL (1+HZ/20)
488#define POLL_WAIT (1+HZ/100)
489#define LINK_TIMEOUT (3*HZ)
52da3578 490#define STATS_INTERVAL (10*HZ)
1da177e4 491
f3b197ac 492/*
1da177e4 493 * desc_ver values:
8a4ae7f2
MS
494 * The nic supports three different descriptor types:
495 * - DESC_VER_1: Original
496 * - DESC_VER_2: support for jumbo frames.
497 * - DESC_VER_3: 64-bit format.
1da177e4 498 */
8a4ae7f2
MS
499#define DESC_VER_1 1
500#define DESC_VER_2 2
501#define DESC_VER_3 3
1da177e4
LT
502
503/* PHY defines */
9f3f7910
AA
504#define PHY_OUI_MARVELL 0x5043
505#define PHY_OUI_CICADA 0x03f1
506#define PHY_OUI_VITESSE 0x01c1
507#define PHY_OUI_REALTEK 0x0732
508#define PHY_OUI_REALTEK2 0x0020
1da177e4
LT
509#define PHYID1_OUI_MASK 0x03ff
510#define PHYID1_OUI_SHFT 6
511#define PHYID2_OUI_MASK 0xfc00
512#define PHYID2_OUI_SHFT 10
edf7e5ec 513#define PHYID2_MODEL_MASK 0x03f0
9f3f7910
AA
514#define PHY_MODEL_REALTEK_8211 0x0110
515#define PHY_REV_MASK 0x0001
516#define PHY_REV_REALTEK_8211B 0x0000
517#define PHY_REV_REALTEK_8211C 0x0001
518#define PHY_MODEL_REALTEK_8201 0x0200
519#define PHY_MODEL_MARVELL_E3016 0x0220
edf7e5ec 520#define PHY_MARVELL_E3016_INITMASK 0x0300
14a67f3c
AA
521#define PHY_CICADA_INIT1 0x0f000
522#define PHY_CICADA_INIT2 0x0e00
523#define PHY_CICADA_INIT3 0x01000
524#define PHY_CICADA_INIT4 0x0200
525#define PHY_CICADA_INIT5 0x0004
526#define PHY_CICADA_INIT6 0x02000
d215d8a2
AA
527#define PHY_VITESSE_INIT_REG1 0x1f
528#define PHY_VITESSE_INIT_REG2 0x10
529#define PHY_VITESSE_INIT_REG3 0x11
530#define PHY_VITESSE_INIT_REG4 0x12
531#define PHY_VITESSE_INIT_MSK1 0xc
532#define PHY_VITESSE_INIT_MSK2 0x0180
533#define PHY_VITESSE_INIT1 0x52b5
534#define PHY_VITESSE_INIT2 0xaf8a
535#define PHY_VITESSE_INIT3 0x8
536#define PHY_VITESSE_INIT4 0x8f8a
537#define PHY_VITESSE_INIT5 0xaf86
538#define PHY_VITESSE_INIT6 0x8f86
539#define PHY_VITESSE_INIT7 0xaf82
540#define PHY_VITESSE_INIT8 0x0100
541#define PHY_VITESSE_INIT9 0x8f82
542#define PHY_VITESSE_INIT10 0x0
c5e3ae88
AA
543#define PHY_REALTEK_INIT_REG1 0x1f
544#define PHY_REALTEK_INIT_REG2 0x19
545#define PHY_REALTEK_INIT_REG3 0x13
9f3f7910
AA
546#define PHY_REALTEK_INIT_REG4 0x14
547#define PHY_REALTEK_INIT_REG5 0x18
548#define PHY_REALTEK_INIT_REG6 0x11
22ae03a1 549#define PHY_REALTEK_INIT_REG7 0x01
c5e3ae88
AA
550#define PHY_REALTEK_INIT1 0x0000
551#define PHY_REALTEK_INIT2 0x8e00
552#define PHY_REALTEK_INIT3 0x0001
553#define PHY_REALTEK_INIT4 0xad17
9f3f7910
AA
554#define PHY_REALTEK_INIT5 0xfb54
555#define PHY_REALTEK_INIT6 0xf5c7
556#define PHY_REALTEK_INIT7 0x1000
557#define PHY_REALTEK_INIT8 0x0003
22ae03a1
AA
558#define PHY_REALTEK_INIT9 0x0008
559#define PHY_REALTEK_INIT10 0x0005
560#define PHY_REALTEK_INIT11 0x0200
9f3f7910 561#define PHY_REALTEK_INIT_MSK1 0x0003
d215d8a2 562
1da177e4
LT
563#define PHY_GIGABIT 0x0100
564
565#define PHY_TIMEOUT 0x1
566#define PHY_ERROR 0x2
567
568#define PHY_100 0x1
569#define PHY_1000 0x2
570#define PHY_HALF 0x100
571
eb91f61b
AA
572#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
573#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
574#define NV_PAUSEFRAME_RX_ENABLE 0x0004
575#define NV_PAUSEFRAME_TX_ENABLE 0x0008
b6d0773f
AA
576#define NV_PAUSEFRAME_RX_REQ 0x0010
577#define NV_PAUSEFRAME_TX_REQ 0x0020
578#define NV_PAUSEFRAME_AUTONEG 0x0040
1da177e4 579
d33a73c8
AA
580/* MSI/MSI-X defines */
581#define NV_MSI_X_MAX_VECTORS 8
582#define NV_MSI_X_VECTORS_MASK 0x000f
583#define NV_MSI_CAPABLE 0x0010
584#define NV_MSI_X_CAPABLE 0x0020
585#define NV_MSI_ENABLED 0x0040
586#define NV_MSI_X_ENABLED 0x0080
587
588#define NV_MSI_X_VECTOR_ALL 0x0
589#define NV_MSI_X_VECTOR_RX 0x0
590#define NV_MSI_X_VECTOR_TX 0x1
591#define NV_MSI_X_VECTOR_OTHER 0x2
1da177e4 592
b6e4405b
AA
593#define NV_MSI_PRIV_OFFSET 0x68
594#define NV_MSI_PRIV_VALUE 0xffffffff
595
b2976d23
AA
596#define NV_RESTART_TX 0x1
597#define NV_RESTART_RX 0x2
598
3b446c3e
AA
599#define NV_TX_LIMIT_COUNT 16
600
4145ade2
AA
601#define NV_DYNAMIC_THRESHOLD 4
602#define NV_DYNAMIC_MAX_QUIET_COUNT 2048
603
52da3578
AA
604/* statistics */
605struct nv_ethtool_str {
606 char name[ETH_GSTRING_LEN];
607};
608
609static const struct nv_ethtool_str nv_estats_str[] = {
610 { "tx_bytes" },
611 { "tx_zero_rexmt" },
612 { "tx_one_rexmt" },
613 { "tx_many_rexmt" },
614 { "tx_late_collision" },
615 { "tx_fifo_errors" },
616 { "tx_carrier_errors" },
617 { "tx_excess_deferral" },
618 { "tx_retry_error" },
52da3578
AA
619 { "rx_frame_error" },
620 { "rx_extra_byte" },
621 { "rx_late_collision" },
622 { "rx_runt" },
623 { "rx_frame_too_long" },
624 { "rx_over_errors" },
625 { "rx_crc_errors" },
626 { "rx_frame_align_error" },
627 { "rx_length_error" },
628 { "rx_unicast" },
629 { "rx_multicast" },
630 { "rx_broadcast" },
57fff698
AA
631 { "rx_packets" },
632 { "rx_errors_total" },
633 { "tx_errors_total" },
634
635 /* version 2 stats */
636 { "tx_deferral" },
637 { "tx_packets" },
52da3578 638 { "rx_bytes" },
57fff698 639 { "tx_pause" },
52da3578 640 { "rx_pause" },
9c662435
AA
641 { "rx_drop_frame" },
642
643 /* version 3 stats */
644 { "tx_unicast" },
645 { "tx_multicast" },
646 { "tx_broadcast" }
52da3578
AA
647};
648
649struct nv_ethtool_stats {
650 u64 tx_bytes;
651 u64 tx_zero_rexmt;
652 u64 tx_one_rexmt;
653 u64 tx_many_rexmt;
654 u64 tx_late_collision;
655 u64 tx_fifo_errors;
656 u64 tx_carrier_errors;
657 u64 tx_excess_deferral;
658 u64 tx_retry_error;
52da3578
AA
659 u64 rx_frame_error;
660 u64 rx_extra_byte;
661 u64 rx_late_collision;
662 u64 rx_runt;
663 u64 rx_frame_too_long;
664 u64 rx_over_errors;
665 u64 rx_crc_errors;
666 u64 rx_frame_align_error;
667 u64 rx_length_error;
668 u64 rx_unicast;
669 u64 rx_multicast;
670 u64 rx_broadcast;
57fff698
AA
671 u64 rx_packets;
672 u64 rx_errors_total;
673 u64 tx_errors_total;
674
675 /* version 2 stats */
676 u64 tx_deferral;
677 u64 tx_packets;
52da3578 678 u64 rx_bytes;
57fff698 679 u64 tx_pause;
52da3578
AA
680 u64 rx_pause;
681 u64 rx_drop_frame;
9c662435
AA
682
683 /* version 3 stats */
684 u64 tx_unicast;
685 u64 tx_multicast;
686 u64 tx_broadcast;
52da3578
AA
687};
688
9c662435
AA
689#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
690#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
57fff698
AA
691#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
692
9589c77a
AA
693/* diagnostics */
694#define NV_TEST_COUNT_BASE 3
695#define NV_TEST_COUNT_EXTENDED 4
696
697static const struct nv_ethtool_str nv_etests_str[] = {
698 { "link (online/offline)" },
699 { "register (offline) " },
700 { "interrupt (offline) " },
701 { "loopback (offline) " }
702};
703
704struct register_test {
5bb7ea26
AV
705 __u32 reg;
706 __u32 mask;
9589c77a
AA
707};
708
709static const struct register_test nv_registers_test[] = {
710 { NvRegUnknownSetupReg6, 0x01 },
711 { NvRegMisc1, 0x03c },
712 { NvRegOffloadConfig, 0x03ff },
713 { NvRegMulticastAddrA, 0xffffffff },
95d161cb 714 { NvRegTxWatermark, 0x0ff },
9589c77a
AA
715 { NvRegWakeUpFlags, 0x07777 },
716 { 0,0 }
717};
718
761fcd9e
AA
719struct nv_skb_map {
720 struct sk_buff *skb;
721 dma_addr_t dma;
73a37079
ED
722 unsigned int dma_len:31;
723 unsigned int dma_single:1;
3b446c3e
AA
724 struct ring_desc_ex *first_tx_desc;
725 struct nv_skb_map *next_tx_ctx;
761fcd9e
AA
726};
727
1da177e4
LT
728/*
729 * SMP locking:
b74ca3a8 730 * All hardware access under netdev_priv(dev)->lock, except the performance
1da177e4
LT
731 * critical parts:
732 * - rx is (pseudo-) lockless: it relies on the single-threading provided
733 * by the arch code for interrupts.
932ff279 734 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
b74ca3a8 735 * needs netdev_priv(dev)->lock :-(
932ff279 736 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
1da177e4
LT
737 */
738
739/* in dev: base, irq */
740struct fe_priv {
741 spinlock_t lock;
742
bea3348e
SH
743 struct net_device *dev;
744 struct napi_struct napi;
745
1da177e4
LT
746 /* General data:
747 * Locking: spin_lock(&np->lock); */
52da3578 748 struct nv_ethtool_stats estats;
1da177e4
LT
749 int in_shutdown;
750 u32 linkspeed;
751 int duplex;
752 int autoneg;
753 int fixed_mode;
754 int phyaddr;
755 int wolenabled;
756 unsigned int phy_oui;
edf7e5ec 757 unsigned int phy_model;
9f3f7910 758 unsigned int phy_rev;
1da177e4 759 u16 gigabit;
9589c77a 760 int intr_test;
c5cf9101 761 int recover_error;
4145ade2 762 int quiet_count;
1da177e4
LT
763
764 /* General data: RO fields */
765 dma_addr_t ring_addr;
766 struct pci_dev *pci_dev;
767 u32 orig_mac[2];
582806be 768 u32 events;
1da177e4
LT
769 u32 irqmask;
770 u32 desc_ver;
8a4ae7f2 771 u32 txrxctl_bits;
ee407b02 772 u32 vlanctl_bits;
86a0f043 773 u32 driver_data;
9f3f7910 774 u32 device_id;
86a0f043 775 u32 register_size;
f2ad2d9b 776 int rx_csum;
7e680c22 777 u32 mac_in_use;
cac1c52c
AA
778 int mgmt_version;
779 int mgmt_sema;
1da177e4
LT
780
781 void __iomem *base;
782
783 /* rx specific fields.
784 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
785 */
761fcd9e
AA
786 union ring_type get_rx, put_rx, first_rx, last_rx;
787 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
788 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
789 struct nv_skb_map *rx_skb;
790
f82a9352 791 union ring_type rx_ring;
1da177e4 792 unsigned int rx_buf_sz;
d81c0983 793 unsigned int pkt_limit;
1da177e4
LT
794 struct timer_list oom_kick;
795 struct timer_list nic_poll;
52da3578 796 struct timer_list stats_poll;
d33a73c8 797 u32 nic_poll_irq;
eafa59f6 798 int rx_ring_size;
1da177e4
LT
799
800 /* media detection workaround.
801 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
802 */
803 int need_linktimer;
804 unsigned long link_timeout;
805 /*
806 * tx specific fields.
807 */
761fcd9e
AA
808 union ring_type get_tx, put_tx, first_tx, last_tx;
809 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
810 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
811 struct nv_skb_map *tx_skb;
812
f82a9352 813 union ring_type tx_ring;
1da177e4 814 u32 tx_flags;
eafa59f6 815 int tx_ring_size;
3b446c3e
AA
816 int tx_limit;
817 u32 tx_pkts_in_progress;
818 struct nv_skb_map *tx_change_owner;
819 struct nv_skb_map *tx_end_flip;
aaa37d2d 820 int tx_stop;
ee407b02
AA
821
822 /* vlan fields */
823 struct vlan_group *vlangrp;
d33a73c8
AA
824
825 /* msi/msi-x fields */
826 u32 msi_flags;
827 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
eb91f61b
AA
828
829 /* flow control */
830 u32 pause_flags;
1a1ca861
TD
831
832 /* power saved state */
833 u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
ddb213f0
YL
834
835 /* for different msi-x irq type */
836 char name_rx[IFNAMSIZ + 3]; /* -rx */
837 char name_tx[IFNAMSIZ + 3]; /* -tx */
838 char name_other[IFNAMSIZ + 6]; /* -other */
1da177e4
LT
839};
840
841/*
842 * Maximum number of loops until we assume that a bit in the irq mask
843 * is stuck. Overridable with module param.
844 */
4145ade2 845static int max_interrupt_work = 4;
1da177e4 846
a971c324
AA
847/*
848 * Optimization can be either throuput mode or cpu mode
f3b197ac 849 *
a971c324
AA
850 * Throughput Mode: Every tx and rx packet will generate an interrupt.
851 * CPU Mode: Interrupts are controlled by a timer.
852 */
69fe3fd7
AA
853enum {
854 NV_OPTIMIZATION_MODE_THROUGHPUT,
9e184767
AA
855 NV_OPTIMIZATION_MODE_CPU,
856 NV_OPTIMIZATION_MODE_DYNAMIC
69fe3fd7 857};
9e184767 858static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
a971c324
AA
859
860/*
861 * Poll interval for timer irq
862 *
863 * This interval determines how frequent an interrupt is generated.
864 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
865 * Min = 0, and Max = 65535
866 */
867static int poll_interval = -1;
868
d33a73c8 869/*
69fe3fd7 870 * MSI interrupts
d33a73c8 871 */
69fe3fd7
AA
872enum {
873 NV_MSI_INT_DISABLED,
874 NV_MSI_INT_ENABLED
875};
876static int msi = NV_MSI_INT_ENABLED;
d33a73c8
AA
877
878/*
69fe3fd7 879 * MSIX interrupts
d33a73c8 880 */
69fe3fd7
AA
881enum {
882 NV_MSIX_INT_DISABLED,
883 NV_MSIX_INT_ENABLED
884};
39482791 885static int msix = NV_MSIX_INT_ENABLED;
69fe3fd7
AA
886
887/*
888 * DMA 64bit
889 */
890enum {
891 NV_DMA_64BIT_DISABLED,
892 NV_DMA_64BIT_ENABLED
893};
894static int dma_64bit = NV_DMA_64BIT_ENABLED;
d33a73c8 895
9f3f7910
AA
896/*
897 * Crossover Detection
898 * Realtek 8201 phy + some OEM boards do not work properly.
899 */
900enum {
901 NV_CROSSOVER_DETECTION_DISABLED,
902 NV_CROSSOVER_DETECTION_ENABLED
903};
904static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
905
5a9a8e32
ES
906/*
907 * Power down phy when interface is down (persists through reboot;
908 * older Linux and other OSes may not power it up again)
909 */
910static int phy_power_down = 0;
911
1da177e4
LT
912static inline struct fe_priv *get_nvpriv(struct net_device *dev)
913{
914 return netdev_priv(dev);
915}
916
917static inline u8 __iomem *get_hwbase(struct net_device *dev)
918{
ac9c1897 919 return ((struct fe_priv *)netdev_priv(dev))->base;
1da177e4
LT
920}
921
922static inline void pci_push(u8 __iomem *base)
923{
924 /* force out pending posted writes */
925 readl(base);
926}
927
928static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
929{
f82a9352 930 return le32_to_cpu(prd->flaglen)
1da177e4
LT
931 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
932}
933
ee73362c
MS
934static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
935{
f82a9352 936 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
ee73362c
MS
937}
938
36b30ea9
JG
939static bool nv_optimized(struct fe_priv *np)
940{
941 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
942 return false;
943 return true;
944}
945
1da177e4
LT
946static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
947 int delay, int delaymax, const char *msg)
948{
949 u8 __iomem *base = get_hwbase(dev);
950
951 pci_push(base);
952 do {
953 udelay(delay);
954 delaymax -= delay;
955 if (delaymax < 0) {
956 if (msg)
6a64cd64 957 printk("%s", msg);
1da177e4
LT
958 return 1;
959 }
960 } while ((readl(base + offset) & mask) != target);
961 return 0;
962}
963
0832b25a
AA
964#define NV_SETUP_RX_RING 0x01
965#define NV_SETUP_TX_RING 0x02
966
5bb7ea26
AV
967static inline u32 dma_low(dma_addr_t addr)
968{
969 return addr;
970}
971
972static inline u32 dma_high(dma_addr_t addr)
973{
974 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
975}
976
0832b25a
AA
977static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
978{
979 struct fe_priv *np = get_nvpriv(dev);
980 u8 __iomem *base = get_hwbase(dev);
981
36b30ea9 982 if (!nv_optimized(np)) {
0832b25a 983 if (rxtx_flags & NV_SETUP_RX_RING) {
5bb7ea26 984 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
0832b25a
AA
985 }
986 if (rxtx_flags & NV_SETUP_TX_RING) {
5bb7ea26 987 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
0832b25a
AA
988 }
989 } else {
990 if (rxtx_flags & NV_SETUP_RX_RING) {
5bb7ea26
AV
991 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
992 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
0832b25a
AA
993 }
994 if (rxtx_flags & NV_SETUP_TX_RING) {
5bb7ea26
AV
995 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
996 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
0832b25a
AA
997 }
998 }
999}
1000
eafa59f6
AA
1001static void free_rings(struct net_device *dev)
1002{
1003 struct fe_priv *np = get_nvpriv(dev);
1004
36b30ea9 1005 if (!nv_optimized(np)) {
f82a9352 1006 if (np->rx_ring.orig)
eafa59f6
AA
1007 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
1008 np->rx_ring.orig, np->ring_addr);
1009 } else {
1010 if (np->rx_ring.ex)
1011 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1012 np->rx_ring.ex, np->ring_addr);
1013 }
761fcd9e
AA
1014 if (np->rx_skb)
1015 kfree(np->rx_skb);
1016 if (np->tx_skb)
1017 kfree(np->tx_skb);
eafa59f6
AA
1018}
1019
84b3932b
AA
1020static int using_multi_irqs(struct net_device *dev)
1021{
1022 struct fe_priv *np = get_nvpriv(dev);
1023
1024 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1025 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1026 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1027 return 0;
1028 else
1029 return 1;
1030}
1031
88d7d8b0
AA
1032static void nv_txrx_gate(struct net_device *dev, bool gate)
1033{
1034 struct fe_priv *np = get_nvpriv(dev);
1035 u8 __iomem *base = get_hwbase(dev);
1036 u32 powerstate;
1037
1038 if (!np->mac_in_use &&
1039 (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1040 powerstate = readl(base + NvRegPowerState2);
1041 if (gate)
1042 powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
1043 else
1044 powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
1045 writel(powerstate, base + NvRegPowerState2);
1046 }
1047}
1048
84b3932b
AA
1049static void nv_enable_irq(struct net_device *dev)
1050{
1051 struct fe_priv *np = get_nvpriv(dev);
1052
1053 if (!using_multi_irqs(dev)) {
1054 if (np->msi_flags & NV_MSI_X_ENABLED)
1055 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1056 else
a7475906 1057 enable_irq(np->pci_dev->irq);
84b3932b
AA
1058 } else {
1059 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1060 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1061 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1062 }
1063}
1064
1065static void nv_disable_irq(struct net_device *dev)
1066{
1067 struct fe_priv *np = get_nvpriv(dev);
1068
1069 if (!using_multi_irqs(dev)) {
1070 if (np->msi_flags & NV_MSI_X_ENABLED)
1071 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1072 else
a7475906 1073 disable_irq(np->pci_dev->irq);
84b3932b
AA
1074 } else {
1075 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1076 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1077 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1078 }
1079}
1080
1081/* In MSIX mode, a write to irqmask behaves as XOR */
1082static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1083{
1084 u8 __iomem *base = get_hwbase(dev);
1085
1086 writel(mask, base + NvRegIrqMask);
1087}
1088
1089static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1090{
1091 struct fe_priv *np = get_nvpriv(dev);
1092 u8 __iomem *base = get_hwbase(dev);
1093
1094 if (np->msi_flags & NV_MSI_X_ENABLED) {
1095 writel(mask, base + NvRegIrqMask);
1096 } else {
1097 if (np->msi_flags & NV_MSI_ENABLED)
1098 writel(0, base + NvRegMSIIrqMask);
1099 writel(0, base + NvRegIrqMask);
1100 }
1101}
1102
08d93575
AA
1103static void nv_napi_enable(struct net_device *dev)
1104{
1105#ifdef CONFIG_FORCEDETH_NAPI
1106 struct fe_priv *np = get_nvpriv(dev);
1107
1108 napi_enable(&np->napi);
1109#endif
1110}
1111
1112static void nv_napi_disable(struct net_device *dev)
1113{
1114#ifdef CONFIG_FORCEDETH_NAPI
1115 struct fe_priv *np = get_nvpriv(dev);
1116
1117 napi_disable(&np->napi);
1118#endif
1119}
1120
1da177e4
LT
1121#define MII_READ (-1)
1122/* mii_rw: read/write a register on the PHY.
1123 *
1124 * Caller must guarantee serialization
1125 */
1126static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1127{
1128 u8 __iomem *base = get_hwbase(dev);
1129 u32 reg;
1130 int retval;
1131
eb798428 1132 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1da177e4
LT
1133
1134 reg = readl(base + NvRegMIIControl);
1135 if (reg & NVREG_MIICTL_INUSE) {
1136 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1137 udelay(NV_MIIBUSY_DELAY);
1138 }
1139
1140 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1141 if (value != MII_READ) {
1142 writel(value, base + NvRegMIIData);
1143 reg |= NVREG_MIICTL_WRITE;
1144 }
1145 writel(reg, base + NvRegMIIControl);
1146
1147 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1148 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1149 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1150 dev->name, miireg, addr);
1151 retval = -1;
1152 } else if (value != MII_READ) {
1153 /* it was a write operation - fewer failures are detectable */
1154 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1155 dev->name, value, miireg, addr);
1156 retval = 0;
1157 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1158 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1159 dev->name, miireg, addr);
1160 retval = -1;
1161 } else {
1162 retval = readl(base + NvRegMIIData);
1163 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1164 dev->name, miireg, addr, retval);
1165 }
1166
1167 return retval;
1168}
1169
edf7e5ec 1170static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1da177e4 1171{
ac9c1897 1172 struct fe_priv *np = netdev_priv(dev);
1da177e4
LT
1173 u32 miicontrol;
1174 unsigned int tries = 0;
1175
edf7e5ec 1176 miicontrol = BMCR_RESET | bmcr_setup;
1da177e4
LT
1177 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1178 return -1;
1179 }
1180
1181 /* wait for 500ms */
1182 msleep(500);
1183
1184 /* must wait till reset is deasserted */
1185 while (miicontrol & BMCR_RESET) {
1186 msleep(10);
1187 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1188 /* FIXME: 100 tries seem excessive */
1189 if (tries++ > 100)
1190 return -1;
1191 }
1192 return 0;
1193}
1194
1195static int phy_init(struct net_device *dev)
1196{
1197 struct fe_priv *np = get_nvpriv(dev);
1198 u8 __iomem *base = get_hwbase(dev);
1199 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1200
edf7e5ec
AA
1201 /* phy errata for E3016 phy */
1202 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1203 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1204 reg &= ~PHY_MARVELL_E3016_INITMASK;
1205 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1206 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1207 return PHY_ERROR;
1208 }
1209 }
c5e3ae88 1210 if (np->phy_oui == PHY_OUI_REALTEK) {
9f3f7910
AA
1211 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1212 np->phy_rev == PHY_REV_REALTEK_8211B) {
1213 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1214 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1215 return PHY_ERROR;
1216 }
1217 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1218 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1219 return PHY_ERROR;
1220 }
1221 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1222 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1223 return PHY_ERROR;
1224 }
1225 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1226 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1227 return PHY_ERROR;
1228 }
1229 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1230 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1231 return PHY_ERROR;
1232 }
1233 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1234 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1235 return PHY_ERROR;
1236 }
1237 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1238 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1239 return PHY_ERROR;
1240 }
c5e3ae88 1241 }
22ae03a1
AA
1242 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1243 np->phy_rev == PHY_REV_REALTEK_8211C) {
1244 u32 powerstate = readl(base + NvRegPowerState2);
1245
1246 /* need to perform hw phy reset */
1247 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1248 writel(powerstate, base + NvRegPowerState2);
1249 msleep(25);
1250
1251 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1252 writel(powerstate, base + NvRegPowerState2);
1253 msleep(25);
1254
1255 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1256 reg |= PHY_REALTEK_INIT9;
1257 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
1258 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1259 return PHY_ERROR;
1260 }
1261 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
1262 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1263 return PHY_ERROR;
1264 }
1265 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1266 if (!(reg & PHY_REALTEK_INIT11)) {
1267 reg |= PHY_REALTEK_INIT11;
1268 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
1269 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1270 return PHY_ERROR;
1271 }
1272 }
1273 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1274 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1275 return PHY_ERROR;
1276 }
1277 }
9f3f7910 1278 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
3c2e1c11 1279 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
9f3f7910
AA
1280 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1281 phy_reserved |= PHY_REALTEK_INIT7;
1282 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1283 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1284 return PHY_ERROR;
1285 }
1286 }
c5e3ae88
AA
1287 }
1288 }
edf7e5ec 1289
1da177e4
LT
1290 /* set advertise register */
1291 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
eb91f61b 1292 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1da177e4
LT
1293 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1294 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1295 return PHY_ERROR;
1296 }
1297
1298 /* get phy interface type */
1299 phyinterface = readl(base + NvRegPhyInterface);
1300
1301 /* see if gigabit phy */
1302 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1303 if (mii_status & PHY_GIGABIT) {
1304 np->gigabit = PHY_GIGABIT;
eb91f61b 1305 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1da177e4
LT
1306 mii_control_1000 &= ~ADVERTISE_1000HALF;
1307 if (phyinterface & PHY_RGMII)
1308 mii_control_1000 |= ADVERTISE_1000FULL;
1309 else
1310 mii_control_1000 &= ~ADVERTISE_1000FULL;
1311
eb91f61b 1312 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1da177e4
LT
1313 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1314 return PHY_ERROR;
1315 }
1316 }
1317 else
1318 np->gigabit = 0;
1319
edf7e5ec
AA
1320 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1321 mii_control |= BMCR_ANENABLE;
1322
22ae03a1
AA
1323 if (np->phy_oui == PHY_OUI_REALTEK &&
1324 np->phy_model == PHY_MODEL_REALTEK_8211 &&
1325 np->phy_rev == PHY_REV_REALTEK_8211C) {
1326 /* start autoneg since we already performed hw reset above */
1327 mii_control |= BMCR_ANRESTART;
1328 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1329 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
1330 return PHY_ERROR;
1331 }
1332 } else {
1333 /* reset the phy
1334 * (certain phys need bmcr to be setup with reset)
1335 */
1336 if (phy_reset(dev, mii_control)) {
1337 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1338 return PHY_ERROR;
1339 }
1da177e4
LT
1340 }
1341
1342 /* phy vendor specific configuration */
1343 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1344 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
14a67f3c
AA
1345 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1346 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1da177e4
LT
1347 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1348 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1349 return PHY_ERROR;
1350 }
1351 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
14a67f3c 1352 phy_reserved |= PHY_CICADA_INIT5;
1da177e4
LT
1353 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1354 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1355 return PHY_ERROR;
1356 }
1357 }
1358 if (np->phy_oui == PHY_OUI_CICADA) {
1359 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
14a67f3c 1360 phy_reserved |= PHY_CICADA_INIT6;
1da177e4
LT
1361 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1362 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1363 return PHY_ERROR;
1364 }
1365 }
d215d8a2
AA
1366 if (np->phy_oui == PHY_OUI_VITESSE) {
1367 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1368 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1369 return PHY_ERROR;
1370 }
1371 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1372 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1373 return PHY_ERROR;
1374 }
1375 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1376 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1377 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1378 return PHY_ERROR;
1379 }
1380 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1381 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1382 phy_reserved |= PHY_VITESSE_INIT3;
1383 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1384 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1385 return PHY_ERROR;
1386 }
1387 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1388 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1389 return PHY_ERROR;
1390 }
1391 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1392 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1393 return PHY_ERROR;
1394 }
1395 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1396 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1397 phy_reserved |= PHY_VITESSE_INIT3;
1398 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1399 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1400 return PHY_ERROR;
1401 }
1402 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1403 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1404 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1405 return PHY_ERROR;
1406 }
1407 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1408 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1409 return PHY_ERROR;
1410 }
1411 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1412 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1413 return PHY_ERROR;
1414 }
1415 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1416 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1417 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1418 return PHY_ERROR;
1419 }
1420 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1421 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1422 phy_reserved |= PHY_VITESSE_INIT8;
1423 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1424 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1425 return PHY_ERROR;
1426 }
1427 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1428 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1429 return PHY_ERROR;
1430 }
1431 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1432 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1433 return PHY_ERROR;
1434 }
1435 }
c5e3ae88 1436 if (np->phy_oui == PHY_OUI_REALTEK) {
9f3f7910
AA
1437 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1438 np->phy_rev == PHY_REV_REALTEK_8211B) {
1439 /* reset could have cleared these out, set them back */
1440 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1441 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1442 return PHY_ERROR;
1443 }
1444 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1445 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1446 return PHY_ERROR;
1447 }
1448 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1449 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1450 return PHY_ERROR;
1451 }
1452 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1453 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1454 return PHY_ERROR;
1455 }
1456 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1457 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1458 return PHY_ERROR;
1459 }
1460 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1461 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1462 return PHY_ERROR;
1463 }
1464 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1465 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1466 return PHY_ERROR;
1467 }
c5e3ae88 1468 }
9f3f7910 1469 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
3c2e1c11 1470 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
9f3f7910
AA
1471 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1472 phy_reserved |= PHY_REALTEK_INIT7;
1473 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1474 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1475 return PHY_ERROR;
1476 }
1477 }
1478 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1479 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1480 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1481 return PHY_ERROR;
1482 }
1483 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
1484 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1485 phy_reserved |= PHY_REALTEK_INIT3;
1486 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
1487 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1488 return PHY_ERROR;
1489 }
1490 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1491 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1492 return PHY_ERROR;
1493 }
1494 }
c5e3ae88
AA
1495 }
1496 }
1497
eb91f61b
AA
1498 /* some phys clear out pause advertisment on reset, set it back */
1499 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1da177e4 1500
cb52deba 1501 /* restart auto negotiation, power down phy */
1da177e4 1502 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
5a9a8e32
ES
1503 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1504 if (phy_power_down) {
1505 mii_control |= BMCR_PDOWN;
1506 }
1da177e4
LT
1507 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1508 return PHY_ERROR;
1509 }
1510
1511 return 0;
1512}
1513
1514static void nv_start_rx(struct net_device *dev)
1515{
ac9c1897 1516 struct fe_priv *np = netdev_priv(dev);
1da177e4 1517 u8 __iomem *base = get_hwbase(dev);
f35723ec 1518 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1da177e4
LT
1519
1520 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1521 /* Already running? Stop it. */
f35723ec
AA
1522 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1523 rx_ctrl &= ~NVREG_RCVCTL_START;
1524 writel(rx_ctrl, base + NvRegReceiverControl);
1da177e4
LT
1525 pci_push(base);
1526 }
1527 writel(np->linkspeed, base + NvRegLinkSpeed);
1528 pci_push(base);
f35723ec
AA
1529 rx_ctrl |= NVREG_RCVCTL_START;
1530 if (np->mac_in_use)
1531 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1532 writel(rx_ctrl, base + NvRegReceiverControl);
1da177e4
LT
1533 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1534 dev->name, np->duplex, np->linkspeed);
1535 pci_push(base);
1536}
1537
1538static void nv_stop_rx(struct net_device *dev)
1539{
f35723ec 1540 struct fe_priv *np = netdev_priv(dev);
1da177e4 1541 u8 __iomem *base = get_hwbase(dev);
f35723ec 1542 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1da177e4
LT
1543
1544 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
f35723ec
AA
1545 if (!np->mac_in_use)
1546 rx_ctrl &= ~NVREG_RCVCTL_START;
1547 else
1548 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1549 writel(rx_ctrl, base + NvRegReceiverControl);
1da177e4
LT
1550 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1551 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1552 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1553
1554 udelay(NV_RXSTOP_DELAY2);
f35723ec
AA
1555 if (!np->mac_in_use)
1556 writel(0, base + NvRegLinkSpeed);
1da177e4
LT
1557}
1558
1559static void nv_start_tx(struct net_device *dev)
1560{
f35723ec 1561 struct fe_priv *np = netdev_priv(dev);
1da177e4 1562 u8 __iomem *base = get_hwbase(dev);
f35723ec 1563 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1da177e4
LT
1564
1565 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
f35723ec
AA
1566 tx_ctrl |= NVREG_XMITCTL_START;
1567 if (np->mac_in_use)
1568 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1569 writel(tx_ctrl, base + NvRegTransmitterControl);
1da177e4
LT
1570 pci_push(base);
1571}
1572
1573static void nv_stop_tx(struct net_device *dev)
1574{
f35723ec 1575 struct fe_priv *np = netdev_priv(dev);
1da177e4 1576 u8 __iomem *base = get_hwbase(dev);
f35723ec 1577 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1da177e4
LT
1578
1579 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
f35723ec
AA
1580 if (!np->mac_in_use)
1581 tx_ctrl &= ~NVREG_XMITCTL_START;
1582 else
1583 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1584 writel(tx_ctrl, base + NvRegTransmitterControl);
1da177e4
LT
1585 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1586 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1587 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1588
1589 udelay(NV_TXSTOP_DELAY2);
f35723ec
AA
1590 if (!np->mac_in_use)
1591 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1592 base + NvRegTransmitPoll);
1da177e4
LT
1593}
1594
36b30ea9
JG
1595static void nv_start_rxtx(struct net_device *dev)
1596{
1597 nv_start_rx(dev);
1598 nv_start_tx(dev);
1599}
1600
1601static void nv_stop_rxtx(struct net_device *dev)
1602{
1603 nv_stop_rx(dev);
1604 nv_stop_tx(dev);
1605}
1606
1da177e4
LT
1607static void nv_txrx_reset(struct net_device *dev)
1608{
ac9c1897 1609 struct fe_priv *np = netdev_priv(dev);
1da177e4
LT
1610 u8 __iomem *base = get_hwbase(dev);
1611
1612 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
8a4ae7f2 1613 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1da177e4
LT
1614 pci_push(base);
1615 udelay(NV_TXRX_RESET_DELAY);
8a4ae7f2 1616 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1da177e4
LT
1617 pci_push(base);
1618}
1619
86a0f043
AA
1620static void nv_mac_reset(struct net_device *dev)
1621{
1622 struct fe_priv *np = netdev_priv(dev);
1623 u8 __iomem *base = get_hwbase(dev);
4e84f9b1 1624 u32 temp1, temp2, temp3;
86a0f043
AA
1625
1626 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
4e84f9b1 1627
86a0f043
AA
1628 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1629 pci_push(base);
4e84f9b1
AA
1630
1631 /* save registers since they will be cleared on reset */
1632 temp1 = readl(base + NvRegMacAddrA);
1633 temp2 = readl(base + NvRegMacAddrB);
1634 temp3 = readl(base + NvRegTransmitPoll);
1635
86a0f043
AA
1636 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1637 pci_push(base);
1638 udelay(NV_MAC_RESET_DELAY);
1639 writel(0, base + NvRegMacReset);
1640 pci_push(base);
1641 udelay(NV_MAC_RESET_DELAY);
4e84f9b1
AA
1642
1643 /* restore saved registers */
1644 writel(temp1, base + NvRegMacAddrA);
1645 writel(temp2, base + NvRegMacAddrB);
1646 writel(temp3, base + NvRegTransmitPoll);
1647
86a0f043
AA
1648 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1649 pci_push(base);
1650}
1651
57fff698
AA
1652static void nv_get_hw_stats(struct net_device *dev)
1653{
1654 struct fe_priv *np = netdev_priv(dev);
1655 u8 __iomem *base = get_hwbase(dev);
1656
1657 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1658 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1659 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1660 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1661 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1662 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1663 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1664 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1665 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1666 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1667 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1668 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1669 np->estats.rx_runt += readl(base + NvRegRxRunt);
1670 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1671 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1672 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1673 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1674 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1675 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1676 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1677 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1678 np->estats.rx_packets =
1679 np->estats.rx_unicast +
1680 np->estats.rx_multicast +
1681 np->estats.rx_broadcast;
1682 np->estats.rx_errors_total =
1683 np->estats.rx_crc_errors +
1684 np->estats.rx_over_errors +
1685 np->estats.rx_frame_error +
1686 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1687 np->estats.rx_late_collision +
1688 np->estats.rx_runt +
1689 np->estats.rx_frame_too_long;
1690 np->estats.tx_errors_total =
1691 np->estats.tx_late_collision +
1692 np->estats.tx_fifo_errors +
1693 np->estats.tx_carrier_errors +
1694 np->estats.tx_excess_deferral +
1695 np->estats.tx_retry_error;
1696
1697 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1698 np->estats.tx_deferral += readl(base + NvRegTxDef);
1699 np->estats.tx_packets += readl(base + NvRegTxFrame);
1700 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1701 np->estats.tx_pause += readl(base + NvRegTxPause);
1702 np->estats.rx_pause += readl(base + NvRegRxPause);
1703 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1704 }
9c662435
AA
1705
1706 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1707 np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1708 np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1709 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1710 }
57fff698
AA
1711}
1712
1da177e4
LT
1713/*
1714 * nv_get_stats: dev->get_stats function
1715 * Get latest stats value from the nic.
1716 * Called with read_lock(&dev_base_lock) held for read -
1717 * only synchronized against unregister_netdevice.
1718 */
1719static struct net_device_stats *nv_get_stats(struct net_device *dev)
1720{
ac9c1897 1721 struct fe_priv *np = netdev_priv(dev);
1da177e4 1722
21828163 1723 /* If the nic supports hw counters then retrieve latest values */
9c662435 1724 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
21828163
AA
1725 nv_get_hw_stats(dev);
1726
1727 /* copy to net_device stats */
8148ff45
JG
1728 dev->stats.tx_bytes = np->estats.tx_bytes;
1729 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1730 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1731 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1732 dev->stats.rx_over_errors = np->estats.rx_over_errors;
1733 dev->stats.rx_errors = np->estats.rx_errors_total;
1734 dev->stats.tx_errors = np->estats.tx_errors_total;
21828163 1735 }
8148ff45
JG
1736
1737 return &dev->stats;
1da177e4
LT
1738}
1739
1740/*
1741 * nv_alloc_rx: fill rx ring entries.
1742 * Return 1 if the allocations for the skbs failed and the
1743 * rx engine is without Available descriptors
1744 */
1745static int nv_alloc_rx(struct net_device *dev)
1746{
ac9c1897 1747 struct fe_priv *np = netdev_priv(dev);
86b22b0d 1748 struct ring_desc* less_rx;
1da177e4 1749
86b22b0d
AA
1750 less_rx = np->get_rx.orig;
1751 if (less_rx-- == np->first_rx.orig)
1752 less_rx = np->last_rx.orig;
761fcd9e 1753
86b22b0d
AA
1754 while (np->put_rx.orig != less_rx) {
1755 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1756 if (skb) {
86b22b0d 1757 np->put_rx_ctx->skb = skb;
4305b541
ACM
1758 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1759 skb->data,
8b5be268 1760 skb_tailroom(skb),
4305b541 1761 PCI_DMA_FROMDEVICE);
8b5be268 1762 np->put_rx_ctx->dma_len = skb_tailroom(skb);
86b22b0d
AA
1763 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1764 wmb();
1765 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
b01867cb 1766 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
86b22b0d 1767 np->put_rx.orig = np->first_rx.orig;
b01867cb 1768 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
86b22b0d 1769 np->put_rx_ctx = np->first_rx_ctx;
761fcd9e 1770 } else {
86b22b0d 1771 return 1;
761fcd9e 1772 }
86b22b0d
AA
1773 }
1774 return 0;
1775}
1776
1777static int nv_alloc_rx_optimized(struct net_device *dev)
1778{
1779 struct fe_priv *np = netdev_priv(dev);
1780 struct ring_desc_ex* less_rx;
1781
1782 less_rx = np->get_rx.ex;
1783 if (less_rx-- == np->first_rx.ex)
1784 less_rx = np->last_rx.ex;
761fcd9e 1785
86b22b0d
AA
1786 while (np->put_rx.ex != less_rx) {
1787 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
0d63fb32 1788 if (skb) {
761fcd9e 1789 np->put_rx_ctx->skb = skb;
4305b541
ACM
1790 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1791 skb->data,
8b5be268 1792 skb_tailroom(skb),
4305b541 1793 PCI_DMA_FROMDEVICE);
8b5be268 1794 np->put_rx_ctx->dma_len = skb_tailroom(skb);
5bb7ea26
AV
1795 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1796 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
86b22b0d
AA
1797 wmb();
1798 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
b01867cb 1799 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
86b22b0d 1800 np->put_rx.ex = np->first_rx.ex;
b01867cb 1801 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
0d63fb32 1802 np->put_rx_ctx = np->first_rx_ctx;
1da177e4 1803 } else {
0d63fb32 1804 return 1;
ee73362c 1805 }
1da177e4 1806 }
1da177e4
LT
1807 return 0;
1808}
1809
e27cdba5
SH
1810/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1811#ifdef CONFIG_FORCEDETH_NAPI
1812static void nv_do_rx_refill(unsigned long data)
1813{
1814 struct net_device *dev = (struct net_device *) data;
bea3348e 1815 struct fe_priv *np = netdev_priv(dev);
e27cdba5
SH
1816
1817 /* Just reschedule NAPI rx processing */
288379f0 1818 napi_schedule(&np->napi);
e27cdba5
SH
1819}
1820#else
1da177e4
LT
1821static void nv_do_rx_refill(unsigned long data)
1822{
1823 struct net_device *dev = (struct net_device *) data;
ac9c1897 1824 struct fe_priv *np = netdev_priv(dev);
86b22b0d 1825 int retcode;
1da177e4 1826
84b3932b
AA
1827 if (!using_multi_irqs(dev)) {
1828 if (np->msi_flags & NV_MSI_X_ENABLED)
1829 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1830 else
a7475906 1831 disable_irq(np->pci_dev->irq);
d33a73c8
AA
1832 } else {
1833 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1834 }
36b30ea9 1835 if (!nv_optimized(np))
86b22b0d
AA
1836 retcode = nv_alloc_rx(dev);
1837 else
1838 retcode = nv_alloc_rx_optimized(dev);
1839 if (retcode) {
84b3932b 1840 spin_lock_irq(&np->lock);
1da177e4
LT
1841 if (!np->in_shutdown)
1842 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
84b3932b 1843 spin_unlock_irq(&np->lock);
1da177e4 1844 }
84b3932b
AA
1845 if (!using_multi_irqs(dev)) {
1846 if (np->msi_flags & NV_MSI_X_ENABLED)
1847 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1848 else
a7475906 1849 enable_irq(np->pci_dev->irq);
d33a73c8
AA
1850 } else {
1851 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1852 }
1da177e4 1853}
e27cdba5 1854#endif
1da177e4 1855
f3b197ac 1856static void nv_init_rx(struct net_device *dev)
1da177e4 1857{
ac9c1897 1858 struct fe_priv *np = netdev_priv(dev);
1da177e4 1859 int i;
36b30ea9 1860
761fcd9e 1861 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
36b30ea9
JG
1862
1863 if (!nv_optimized(np))
761fcd9e
AA
1864 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1865 else
1866 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1867 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1868 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1da177e4 1869
761fcd9e 1870 for (i = 0; i < np->rx_ring_size; i++) {
36b30ea9 1871 if (!nv_optimized(np)) {
f82a9352 1872 np->rx_ring.orig[i].flaglen = 0;
761fcd9e
AA
1873 np->rx_ring.orig[i].buf = 0;
1874 } else {
f82a9352 1875 np->rx_ring.ex[i].flaglen = 0;
761fcd9e
AA
1876 np->rx_ring.ex[i].txvlan = 0;
1877 np->rx_ring.ex[i].bufhigh = 0;
1878 np->rx_ring.ex[i].buflow = 0;
1879 }
1880 np->rx_skb[i].skb = NULL;
1881 np->rx_skb[i].dma = 0;
1882 }
d81c0983
MS
1883}
1884
1885static void nv_init_tx(struct net_device *dev)
1886{
ac9c1897 1887 struct fe_priv *np = netdev_priv(dev);
d81c0983 1888 int i;
36b30ea9 1889
761fcd9e 1890 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
36b30ea9
JG
1891
1892 if (!nv_optimized(np))
761fcd9e
AA
1893 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1894 else
1895 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1896 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1897 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
3b446c3e
AA
1898 np->tx_pkts_in_progress = 0;
1899 np->tx_change_owner = NULL;
1900 np->tx_end_flip = NULL;
8f955d7f 1901 np->tx_stop = 0;
d81c0983 1902
eafa59f6 1903 for (i = 0; i < np->tx_ring_size; i++) {
36b30ea9 1904 if (!nv_optimized(np)) {
f82a9352 1905 np->tx_ring.orig[i].flaglen = 0;
761fcd9e
AA
1906 np->tx_ring.orig[i].buf = 0;
1907 } else {
f82a9352 1908 np->tx_ring.ex[i].flaglen = 0;
761fcd9e
AA
1909 np->tx_ring.ex[i].txvlan = 0;
1910 np->tx_ring.ex[i].bufhigh = 0;
1911 np->tx_ring.ex[i].buflow = 0;
1912 }
1913 np->tx_skb[i].skb = NULL;
1914 np->tx_skb[i].dma = 0;
3b446c3e 1915 np->tx_skb[i].dma_len = 0;
73a37079 1916 np->tx_skb[i].dma_single = 0;
3b446c3e
AA
1917 np->tx_skb[i].first_tx_desc = NULL;
1918 np->tx_skb[i].next_tx_ctx = NULL;
ac9c1897 1919 }
d81c0983
MS
1920}
1921
1922static int nv_init_ring(struct net_device *dev)
1923{
86b22b0d
AA
1924 struct fe_priv *np = netdev_priv(dev);
1925
d81c0983
MS
1926 nv_init_tx(dev);
1927 nv_init_rx(dev);
36b30ea9
JG
1928
1929 if (!nv_optimized(np))
86b22b0d
AA
1930 return nv_alloc_rx(dev);
1931 else
1932 return nv_alloc_rx_optimized(dev);
1da177e4
LT
1933}
1934
73a37079 1935static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
ac9c1897 1936{
761fcd9e 1937 if (tx_skb->dma) {
73a37079
ED
1938 if (tx_skb->dma_single)
1939 pci_unmap_single(np->pci_dev, tx_skb->dma,
1940 tx_skb->dma_len,
1941 PCI_DMA_TODEVICE);
1942 else
1943 pci_unmap_page(np->pci_dev, tx_skb->dma,
1944 tx_skb->dma_len,
1945 PCI_DMA_TODEVICE);
761fcd9e 1946 tx_skb->dma = 0;
fa45459e 1947 }
73a37079
ED
1948}
1949
1950static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1951{
1952 nv_unmap_txskb(np, tx_skb);
761fcd9e
AA
1953 if (tx_skb->skb) {
1954 dev_kfree_skb_any(tx_skb->skb);
1955 tx_skb->skb = NULL;
fa45459e 1956 return 1;
ac9c1897 1957 }
73a37079 1958 return 0;
ac9c1897
AA
1959}
1960
1da177e4
LT
1961static void nv_drain_tx(struct net_device *dev)
1962{
ac9c1897
AA
1963 struct fe_priv *np = netdev_priv(dev);
1964 unsigned int i;
f3b197ac 1965
eafa59f6 1966 for (i = 0; i < np->tx_ring_size; i++) {
36b30ea9 1967 if (!nv_optimized(np)) {
f82a9352 1968 np->tx_ring.orig[i].flaglen = 0;
761fcd9e
AA
1969 np->tx_ring.orig[i].buf = 0;
1970 } else {
f82a9352 1971 np->tx_ring.ex[i].flaglen = 0;
761fcd9e
AA
1972 np->tx_ring.ex[i].txvlan = 0;
1973 np->tx_ring.ex[i].bufhigh = 0;
1974 np->tx_ring.ex[i].buflow = 0;
1975 }
73a37079 1976 if (nv_release_txskb(np, &np->tx_skb[i]))
8148ff45 1977 dev->stats.tx_dropped++;
3b446c3e
AA
1978 np->tx_skb[i].dma = 0;
1979 np->tx_skb[i].dma_len = 0;
73a37079 1980 np->tx_skb[i].dma_single = 0;
3b446c3e
AA
1981 np->tx_skb[i].first_tx_desc = NULL;
1982 np->tx_skb[i].next_tx_ctx = NULL;
1da177e4 1983 }
3b446c3e
AA
1984 np->tx_pkts_in_progress = 0;
1985 np->tx_change_owner = NULL;
1986 np->tx_end_flip = NULL;
1da177e4
LT
1987}
1988
1989static void nv_drain_rx(struct net_device *dev)
1990{
ac9c1897 1991 struct fe_priv *np = netdev_priv(dev);
1da177e4 1992 int i;
761fcd9e 1993
eafa59f6 1994 for (i = 0; i < np->rx_ring_size; i++) {
36b30ea9 1995 if (!nv_optimized(np)) {
f82a9352 1996 np->rx_ring.orig[i].flaglen = 0;
761fcd9e
AA
1997 np->rx_ring.orig[i].buf = 0;
1998 } else {
f82a9352 1999 np->rx_ring.ex[i].flaglen = 0;
761fcd9e
AA
2000 np->rx_ring.ex[i].txvlan = 0;
2001 np->rx_ring.ex[i].bufhigh = 0;
2002 np->rx_ring.ex[i].buflow = 0;
2003 }
1da177e4 2004 wmb();
761fcd9e
AA
2005 if (np->rx_skb[i].skb) {
2006 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
4305b541
ACM
2007 (skb_end_pointer(np->rx_skb[i].skb) -
2008 np->rx_skb[i].skb->data),
2009 PCI_DMA_FROMDEVICE);
761fcd9e
AA
2010 dev_kfree_skb(np->rx_skb[i].skb);
2011 np->rx_skb[i].skb = NULL;
1da177e4
LT
2012 }
2013 }
2014}
2015
36b30ea9 2016static void nv_drain_rxtx(struct net_device *dev)
1da177e4
LT
2017{
2018 nv_drain_tx(dev);
2019 nv_drain_rx(dev);
2020}
2021
761fcd9e
AA
2022static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
2023{
2024 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
2025}
2026
a433686c
AA
2027static void nv_legacybackoff_reseed(struct net_device *dev)
2028{
2029 u8 __iomem *base = get_hwbase(dev);
2030 u32 reg;
2031 u32 low;
2032 int tx_status = 0;
2033
2034 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
2035 get_random_bytes(&low, sizeof(low));
2036 reg |= low & NVREG_SLOTTIME_MASK;
2037
2038 /* Need to stop tx before change takes effect.
2039 * Caller has already gained np->lock.
2040 */
2041 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
2042 if (tx_status)
2043 nv_stop_tx(dev);
2044 nv_stop_rx(dev);
2045 writel(reg, base + NvRegSlotTime);
2046 if (tx_status)
2047 nv_start_tx(dev);
2048 nv_start_rx(dev);
2049}
2050
2051/* Gear Backoff Seeds */
2052#define BACKOFF_SEEDSET_ROWS 8
2053#define BACKOFF_SEEDSET_LFSRS 15
2054
2055/* Known Good seed sets */
2056static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2057 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2058 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2059 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2060 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2061 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2062 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2063 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2064 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
2065
2066static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2067 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2068 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2069 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2070 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2071 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2072 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2073 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2074 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
2075
2076static void nv_gear_backoff_reseed(struct net_device *dev)
2077{
2078 u8 __iomem *base = get_hwbase(dev);
2079 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2080 u32 temp, seedset, combinedSeed;
2081 int i;
2082
2083 /* Setup seed for free running LFSR */
2084 /* We are going to read the time stamp counter 3 times
2085 and swizzle bits around to increase randomness */
2086 get_random_bytes(&miniseed1, sizeof(miniseed1));
2087 miniseed1 &= 0x0fff;
2088 if (miniseed1 == 0)
2089 miniseed1 = 0xabc;
2090
2091 get_random_bytes(&miniseed2, sizeof(miniseed2));
2092 miniseed2 &= 0x0fff;
2093 if (miniseed2 == 0)
2094 miniseed2 = 0xabc;
2095 miniseed2_reversed =
2096 ((miniseed2 & 0xF00) >> 8) |
2097 (miniseed2 & 0x0F0) |
2098 ((miniseed2 & 0x00F) << 8);
2099
2100 get_random_bytes(&miniseed3, sizeof(miniseed3));
2101 miniseed3 &= 0x0fff;
2102 if (miniseed3 == 0)
2103 miniseed3 = 0xabc;
2104 miniseed3_reversed =
2105 ((miniseed3 & 0xF00) >> 8) |
2106 (miniseed3 & 0x0F0) |
2107 ((miniseed3 & 0x00F) << 8);
2108
2109 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2110 (miniseed2 ^ miniseed3_reversed);
2111
2112 /* Seeds can not be zero */
2113 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2114 combinedSeed |= 0x08;
2115 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2116 combinedSeed |= 0x8000;
2117
2118 /* No need to disable tx here */
2119 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2120 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2121 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2122 writel(temp,base + NvRegBackOffControl);
2123
2124 /* Setup seeds for all gear LFSRs. */
2125 get_random_bytes(&seedset, sizeof(seedset));
2126 seedset = seedset % BACKOFF_SEEDSET_ROWS;
2127 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
2128 {
2129 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2130 temp |= main_seedset[seedset][i-1] & 0x3ff;
2131 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2132 writel(temp, base + NvRegBackOffControl);
2133 }
2134}
2135
1da177e4
LT
2136/*
2137 * nv_start_xmit: dev->hard_start_xmit function
932ff279 2138 * Called with netif_tx_lock held.
1da177e4 2139 */
61357325 2140static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 2141{
ac9c1897 2142 struct fe_priv *np = netdev_priv(dev);
fa45459e 2143 u32 tx_flags = 0;
ac9c1897
AA
2144 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2145 unsigned int fragments = skb_shinfo(skb)->nr_frags;
ac9c1897 2146 unsigned int i;
fa45459e
AA
2147 u32 offset = 0;
2148 u32 bcnt;
2149 u32 size = skb->len-skb->data_len;
2150 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
761fcd9e 2151 u32 empty_slots;
86b22b0d
AA
2152 struct ring_desc* put_tx;
2153 struct ring_desc* start_tx;
2154 struct ring_desc* prev_tx;
761fcd9e 2155 struct nv_skb_map* prev_tx_ctx;
bd6ca637 2156 unsigned long flags;
fa45459e
AA
2157
2158 /* add fragments to entries count */
2159 for (i = 0; i < fragments; i++) {
2160 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2161 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2162 }
ac9c1897 2163
001eb84b 2164 spin_lock_irqsave(&np->lock, flags);
761fcd9e 2165 empty_slots = nv_get_empty_tx_slots(np);
445583b8 2166 if (unlikely(empty_slots <= entries)) {
ac9c1897 2167 netif_stop_queue(dev);
aaa37d2d 2168 np->tx_stop = 1;
bd6ca637 2169 spin_unlock_irqrestore(&np->lock, flags);
ac9c1897
AA
2170 return NETDEV_TX_BUSY;
2171 }
001eb84b 2172 spin_unlock_irqrestore(&np->lock, flags);
1da177e4 2173
86b22b0d 2174 start_tx = put_tx = np->put_tx.orig;
761fcd9e 2175
fa45459e
AA
2176 /* setup the header buffer */
2177 do {
761fcd9e
AA
2178 prev_tx = put_tx;
2179 prev_tx_ctx = np->put_tx_ctx;
fa45459e 2180 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
761fcd9e 2181 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
fa45459e 2182 PCI_DMA_TODEVICE);
761fcd9e 2183 np->put_tx_ctx->dma_len = bcnt;
73a37079 2184 np->put_tx_ctx->dma_single = 1;
86b22b0d
AA
2185 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2186 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
445583b8 2187
fa45459e
AA
2188 tx_flags = np->tx_flags;
2189 offset += bcnt;
2190 size -= bcnt;
445583b8 2191 if (unlikely(put_tx++ == np->last_tx.orig))
86b22b0d 2192 put_tx = np->first_tx.orig;
445583b8 2193 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
761fcd9e 2194 np->put_tx_ctx = np->first_tx_ctx;
f82a9352 2195 } while (size);
fa45459e
AA
2196
2197 /* setup the fragments */
2198 for (i = 0; i < fragments; i++) {
2199 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2200 u32 size = frag->size;
2201 offset = 0;
2202
2203 do {
761fcd9e
AA
2204 prev_tx = put_tx;
2205 prev_tx_ctx = np->put_tx_ctx;
fa45459e 2206 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
761fcd9e
AA
2207 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2208 PCI_DMA_TODEVICE);
2209 np->put_tx_ctx->dma_len = bcnt;
73a37079 2210 np->put_tx_ctx->dma_single = 0;
86b22b0d
AA
2211 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2212 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
445583b8 2213
fa45459e
AA
2214 offset += bcnt;
2215 size -= bcnt;
445583b8 2216 if (unlikely(put_tx++ == np->last_tx.orig))
86b22b0d 2217 put_tx = np->first_tx.orig;
445583b8 2218 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
761fcd9e 2219 np->put_tx_ctx = np->first_tx_ctx;
fa45459e
AA
2220 } while (size);
2221 }
ac9c1897 2222
fa45459e 2223 /* set last fragment flag */
86b22b0d 2224 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
ac9c1897 2225
761fcd9e
AA
2226 /* save skb in this slot's context area */
2227 prev_tx_ctx->skb = skb;
fa45459e 2228
89114afd 2229 if (skb_is_gso(skb))
7967168c 2230 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
ac9c1897 2231 else
1d39ed56 2232 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
84fa7933 2233 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
ac9c1897 2234
bd6ca637 2235 spin_lock_irqsave(&np->lock, flags);
164a86e4 2236
fa45459e 2237 /* set tx flags */
86b22b0d
AA
2238 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2239 np->put_tx.orig = put_tx;
1da177e4 2240
bd6ca637 2241 spin_unlock_irqrestore(&np->lock, flags);
761fcd9e
AA
2242
2243 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
2244 dev->name, entries, tx_flags_extra);
1da177e4
LT
2245 {
2246 int j;
2247 for (j=0; j<64; j++) {
2248 if ((j%16) == 0)
2249 dprintk("\n%03x:", j);
2250 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2251 }
2252 dprintk("\n");
2253 }
2254
1da177e4 2255 dev->trans_start = jiffies;
8a4ae7f2 2256 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
ac9c1897 2257 return NETDEV_TX_OK;
1da177e4
LT
2258}
2259
61357325
SH
2260static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2261 struct net_device *dev)
86b22b0d
AA
2262{
2263 struct fe_priv *np = netdev_priv(dev);
2264 u32 tx_flags = 0;
445583b8 2265 u32 tx_flags_extra;
86b22b0d
AA
2266 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2267 unsigned int i;
2268 u32 offset = 0;
2269 u32 bcnt;
2270 u32 size = skb->len-skb->data_len;
2271 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2272 u32 empty_slots;
86b22b0d
AA
2273 struct ring_desc_ex* put_tx;
2274 struct ring_desc_ex* start_tx;
2275 struct ring_desc_ex* prev_tx;
2276 struct nv_skb_map* prev_tx_ctx;
3b446c3e 2277 struct nv_skb_map* start_tx_ctx;
bd6ca637 2278 unsigned long flags;
86b22b0d
AA
2279
2280 /* add fragments to entries count */
2281 for (i = 0; i < fragments; i++) {
2282 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2283 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2284 }
2285
001eb84b 2286 spin_lock_irqsave(&np->lock, flags);
86b22b0d 2287 empty_slots = nv_get_empty_tx_slots(np);
445583b8 2288 if (unlikely(empty_slots <= entries)) {
86b22b0d 2289 netif_stop_queue(dev);
aaa37d2d 2290 np->tx_stop = 1;
bd6ca637 2291 spin_unlock_irqrestore(&np->lock, flags);
86b22b0d
AA
2292 return NETDEV_TX_BUSY;
2293 }
001eb84b 2294 spin_unlock_irqrestore(&np->lock, flags);
86b22b0d
AA
2295
2296 start_tx = put_tx = np->put_tx.ex;
3b446c3e 2297 start_tx_ctx = np->put_tx_ctx;
86b22b0d
AA
2298
2299 /* setup the header buffer */
2300 do {
2301 prev_tx = put_tx;
2302 prev_tx_ctx = np->put_tx_ctx;
2303 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2304 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2305 PCI_DMA_TODEVICE);
2306 np->put_tx_ctx->dma_len = bcnt;
73a37079 2307 np->put_tx_ctx->dma_single = 1;
5bb7ea26
AV
2308 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2309 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
86b22b0d 2310 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
445583b8
AA
2311
2312 tx_flags = NV_TX2_VALID;
86b22b0d
AA
2313 offset += bcnt;
2314 size -= bcnt;
445583b8 2315 if (unlikely(put_tx++ == np->last_tx.ex))
86b22b0d 2316 put_tx = np->first_tx.ex;
445583b8 2317 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
86b22b0d
AA
2318 np->put_tx_ctx = np->first_tx_ctx;
2319 } while (size);
2320
2321 /* setup the fragments */
2322 for (i = 0; i < fragments; i++) {
2323 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2324 u32 size = frag->size;
2325 offset = 0;
2326
2327 do {
2328 prev_tx = put_tx;
2329 prev_tx_ctx = np->put_tx_ctx;
2330 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2331 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2332 PCI_DMA_TODEVICE);
2333 np->put_tx_ctx->dma_len = bcnt;
73a37079 2334 np->put_tx_ctx->dma_single = 0;
5bb7ea26
AV
2335 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2336 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
86b22b0d 2337 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
445583b8 2338
86b22b0d
AA
2339 offset += bcnt;
2340 size -= bcnt;
445583b8 2341 if (unlikely(put_tx++ == np->last_tx.ex))
86b22b0d 2342 put_tx = np->first_tx.ex;
445583b8 2343 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
86b22b0d
AA
2344 np->put_tx_ctx = np->first_tx_ctx;
2345 } while (size);
2346 }
2347
2348 /* set last fragment flag */
445583b8 2349 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
86b22b0d
AA
2350
2351 /* save skb in this slot's context area */
2352 prev_tx_ctx->skb = skb;
2353
2354 if (skb_is_gso(skb))
2355 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2356 else
2357 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2358 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2359
2360 /* vlan tag */
445583b8
AA
2361 if (likely(!np->vlangrp)) {
2362 start_tx->txvlan = 0;
2363 } else {
2364 if (vlan_tx_tag_present(skb))
2365 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
2366 else
2367 start_tx->txvlan = 0;
86b22b0d
AA
2368 }
2369
bd6ca637 2370 spin_lock_irqsave(&np->lock, flags);
86b22b0d 2371
3b446c3e
AA
2372 if (np->tx_limit) {
2373 /* Limit the number of outstanding tx. Setup all fragments, but
2374 * do not set the VALID bit on the first descriptor. Save a pointer
2375 * to that descriptor and also for next skb_map element.
2376 */
2377
2378 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2379 if (!np->tx_change_owner)
2380 np->tx_change_owner = start_tx_ctx;
2381
2382 /* remove VALID bit */
2383 tx_flags &= ~NV_TX2_VALID;
2384 start_tx_ctx->first_tx_desc = start_tx;
2385 start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2386 np->tx_end_flip = np->put_tx_ctx;
2387 } else {
2388 np->tx_pkts_in_progress++;
2389 }
2390 }
2391
86b22b0d 2392 /* set tx flags */
86b22b0d
AA
2393 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2394 np->put_tx.ex = put_tx;
2395
bd6ca637 2396 spin_unlock_irqrestore(&np->lock, flags);
86b22b0d
AA
2397
2398 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2399 dev->name, entries, tx_flags_extra);
2400 {
2401 int j;
2402 for (j=0; j<64; j++) {
2403 if ((j%16) == 0)
2404 dprintk("\n%03x:", j);
2405 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2406 }
2407 dprintk("\n");
2408 }
2409
2410 dev->trans_start = jiffies;
2411 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
86b22b0d
AA
2412 return NETDEV_TX_OK;
2413}
2414
3b446c3e
AA
2415static inline void nv_tx_flip_ownership(struct net_device *dev)
2416{
2417 struct fe_priv *np = netdev_priv(dev);
2418
2419 np->tx_pkts_in_progress--;
2420 if (np->tx_change_owner) {
30ecce90
AV
2421 np->tx_change_owner->first_tx_desc->flaglen |=
2422 cpu_to_le32(NV_TX2_VALID);
3b446c3e
AA
2423 np->tx_pkts_in_progress++;
2424
2425 np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2426 if (np->tx_change_owner == np->tx_end_flip)
2427 np->tx_change_owner = NULL;
2428
2429 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2430 }
2431}
2432
1da177e4
LT
2433/*
2434 * nv_tx_done: check for completed packets, release the skbs.
2435 *
2436 * Caller must own np->lock.
2437 */
33912e72 2438static int nv_tx_done(struct net_device *dev, int limit)
1da177e4 2439{
ac9c1897 2440 struct fe_priv *np = netdev_priv(dev);
f82a9352 2441 u32 flags;
33912e72 2442 int tx_work = 0;
aaa37d2d 2443 struct ring_desc* orig_get_tx = np->get_tx.orig;
1da177e4 2444
445583b8 2445 while ((np->get_tx.orig != np->put_tx.orig) &&
33912e72
AA
2446 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2447 (tx_work < limit)) {
1da177e4 2448
761fcd9e
AA
2449 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2450 dev->name, flags);
445583b8 2451
73a37079 2452 nv_unmap_txskb(np, np->get_tx_ctx);
445583b8 2453
1da177e4 2454 if (np->desc_ver == DESC_VER_1) {
f82a9352 2455 if (flags & NV_TX_LASTPACKET) {
445583b8 2456 if (flags & NV_TX_ERROR) {
f82a9352 2457 if (flags & NV_TX_UNDERFLOW)
8148ff45 2458 dev->stats.tx_fifo_errors++;
f82a9352 2459 if (flags & NV_TX_CARRIERLOST)
8148ff45 2460 dev->stats.tx_carrier_errors++;
a433686c
AA
2461 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2462 nv_legacybackoff_reseed(dev);
8148ff45 2463 dev->stats.tx_errors++;
ac9c1897 2464 } else {
8148ff45
JG
2465 dev->stats.tx_packets++;
2466 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
ac9c1897 2467 }
445583b8
AA
2468 dev_kfree_skb_any(np->get_tx_ctx->skb);
2469 np->get_tx_ctx->skb = NULL;
33912e72 2470 tx_work++;
1da177e4
LT
2471 }
2472 } else {
f82a9352 2473 if (flags & NV_TX2_LASTPACKET) {
445583b8 2474 if (flags & NV_TX2_ERROR) {
f82a9352 2475 if (flags & NV_TX2_UNDERFLOW)
8148ff45 2476 dev->stats.tx_fifo_errors++;
f82a9352 2477 if (flags & NV_TX2_CARRIERLOST)
8148ff45 2478 dev->stats.tx_carrier_errors++;
a433686c
AA
2479 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2480 nv_legacybackoff_reseed(dev);
8148ff45 2481 dev->stats.tx_errors++;
ac9c1897 2482 } else {
8148ff45
JG
2483 dev->stats.tx_packets++;
2484 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
f3b197ac 2485 }
445583b8
AA
2486 dev_kfree_skb_any(np->get_tx_ctx->skb);
2487 np->get_tx_ctx->skb = NULL;
33912e72 2488 tx_work++;
1da177e4
LT
2489 }
2490 }
445583b8 2491 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
86b22b0d 2492 np->get_tx.orig = np->first_tx.orig;
445583b8 2493 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
86b22b0d
AA
2494 np->get_tx_ctx = np->first_tx_ctx;
2495 }
445583b8 2496 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
aaa37d2d 2497 np->tx_stop = 0;
86b22b0d 2498 netif_wake_queue(dev);
aaa37d2d 2499 }
33912e72 2500 return tx_work;
86b22b0d
AA
2501}
2502
33912e72 2503static int nv_tx_done_optimized(struct net_device *dev, int limit)
86b22b0d
AA
2504{
2505 struct fe_priv *np = netdev_priv(dev);
2506 u32 flags;
33912e72 2507 int tx_work = 0;
aaa37d2d 2508 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
86b22b0d 2509
445583b8 2510 while ((np->get_tx.ex != np->put_tx.ex) &&
4e16ed1b 2511 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
33912e72 2512 (tx_work < limit)) {
86b22b0d
AA
2513
2514 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2515 dev->name, flags);
445583b8 2516
73a37079 2517 nv_unmap_txskb(np, np->get_tx_ctx);
445583b8 2518
86b22b0d 2519 if (flags & NV_TX2_LASTPACKET) {
21828163 2520 if (!(flags & NV_TX2_ERROR))
8148ff45 2521 dev->stats.tx_packets++;
a433686c
AA
2522 else {
2523 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2524 if (np->driver_data & DEV_HAS_GEAR_MODE)
2525 nv_gear_backoff_reseed(dev);
2526 else
2527 nv_legacybackoff_reseed(dev);
2528 }
2529 }
2530
445583b8
AA
2531 dev_kfree_skb_any(np->get_tx_ctx->skb);
2532 np->get_tx_ctx->skb = NULL;
33912e72 2533 tx_work++;
3b446c3e
AA
2534
2535 if (np->tx_limit) {
2536 nv_tx_flip_ownership(dev);
2537 }
761fcd9e 2538 }
445583b8 2539 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
86b22b0d 2540 np->get_tx.ex = np->first_tx.ex;
445583b8 2541 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
761fcd9e 2542 np->get_tx_ctx = np->first_tx_ctx;
1da177e4 2543 }
445583b8 2544 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
aaa37d2d 2545 np->tx_stop = 0;
1da177e4 2546 netif_wake_queue(dev);
aaa37d2d 2547 }
33912e72 2548 return tx_work;
1da177e4
LT
2549}
2550
2551/*
2552 * nv_tx_timeout: dev->tx_timeout function
932ff279 2553 * Called with netif_tx_lock held.
1da177e4
LT
2554 */
2555static void nv_tx_timeout(struct net_device *dev)
2556{
ac9c1897 2557 struct fe_priv *np = netdev_priv(dev);
1da177e4 2558 u8 __iomem *base = get_hwbase(dev);
d33a73c8 2559 u32 status;
8f955d7f
AA
2560 union ring_type put_tx;
2561 int saved_tx_limit;
d33a73c8
AA
2562
2563 if (np->msi_flags & NV_MSI_X_ENABLED)
2564 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2565 else
2566 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1da177e4 2567
d33a73c8 2568 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
1da177e4 2569
c2dba06d
MS
2570 {
2571 int i;
2572
761fcd9e
AA
2573 printk(KERN_INFO "%s: Ring at %lx\n",
2574 dev->name, (unsigned long)np->ring_addr);
c2dba06d 2575 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
86a0f043 2576 for (i=0;i<=np->register_size;i+= 32) {
c2dba06d
MS
2577 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2578 i,
2579 readl(base + i + 0), readl(base + i + 4),
2580 readl(base + i + 8), readl(base + i + 12),
2581 readl(base + i + 16), readl(base + i + 20),
2582 readl(base + i + 24), readl(base + i + 28));
2583 }
2584 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
eafa59f6 2585 for (i=0;i<np->tx_ring_size;i+= 4) {
36b30ea9 2586 if (!nv_optimized(np)) {
ee73362c 2587 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
f3b197ac 2588 i,
f82a9352
SH
2589 le32_to_cpu(np->tx_ring.orig[i].buf),
2590 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2591 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2592 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2593 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2594 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2595 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2596 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
ee73362c
MS
2597 } else {
2598 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
f3b197ac 2599 i,
f82a9352
SH
2600 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2601 le32_to_cpu(np->tx_ring.ex[i].buflow),
2602 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2603 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2604 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2605 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2606 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2607 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2608 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2609 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2610 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2611 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
ee73362c 2612 }
c2dba06d
MS
2613 }
2614 }
2615
1da177e4
LT
2616 spin_lock_irq(&np->lock);
2617
2618 /* 1) stop tx engine */
2619 nv_stop_tx(dev);
2620
8f955d7f
AA
2621 /* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2622 saved_tx_limit = np->tx_limit;
2623 np->tx_limit = 0; /* prevent giving HW any limited pkts */
2624 np->tx_stop = 0; /* prevent waking tx queue */
36b30ea9 2625 if (!nv_optimized(np))
33912e72 2626 nv_tx_done(dev, np->tx_ring_size);
86b22b0d 2627 else
4e16ed1b 2628 nv_tx_done_optimized(dev, np->tx_ring_size);
1da177e4 2629
8f955d7f
AA
2630 /* save current HW postion */
2631 if (np->tx_change_owner)
2632 put_tx.ex = np->tx_change_owner->first_tx_desc;
2633 else
2634 put_tx = np->put_tx;
1da177e4 2635
8f955d7f
AA
2636 /* 3) clear all tx state */
2637 nv_drain_tx(dev);
2638 nv_init_tx(dev);
2639
2640 /* 4) restore state to current HW position */
2641 np->get_tx = np->put_tx = put_tx;
2642 np->tx_limit = saved_tx_limit;
3ba4d093 2643
8f955d7f 2644 /* 5) restart tx engine */
1da177e4 2645 nv_start_tx(dev);
8f955d7f 2646 netif_wake_queue(dev);
1da177e4
LT
2647 spin_unlock_irq(&np->lock);
2648}
2649
22c6d143
MS
2650/*
2651 * Called when the nic notices a mismatch between the actual data len on the
2652 * wire and the len indicated in the 802 header
2653 */
2654static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2655{
2656 int hdrlen; /* length of the 802 header */
2657 int protolen; /* length as stored in the proto field */
2658
2659 /* 1) calculate len according to header */
f82a9352 2660 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
22c6d143
MS
2661 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
2662 hdrlen = VLAN_HLEN;
2663 } else {
2664 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
2665 hdrlen = ETH_HLEN;
2666 }
2667 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2668 dev->name, datalen, protolen, hdrlen);
2669 if (protolen > ETH_DATA_LEN)
2670 return datalen; /* Value in proto field not a len, no checks possible */
2671
2672 protolen += hdrlen;
2673 /* consistency checks: */
2674 if (datalen > ETH_ZLEN) {
2675 if (datalen >= protolen) {
2676 /* more data on wire than in 802 header, trim of
2677 * additional data.
2678 */
2679 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2680 dev->name, protolen);
2681 return protolen;
2682 } else {
2683 /* less data on wire than mentioned in header.
2684 * Discard the packet.
2685 */
2686 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2687 dev->name);
2688 return -1;
2689 }
2690 } else {
2691 /* short packet. Accept only if 802 values are also short */
2692 if (protolen > ETH_ZLEN) {
2693 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2694 dev->name);
2695 return -1;
2696 }
2697 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2698 dev->name, datalen);
2699 return datalen;
2700 }
2701}
2702
e27cdba5 2703static int nv_rx_process(struct net_device *dev, int limit)
1da177e4 2704{
ac9c1897 2705 struct fe_priv *np = netdev_priv(dev);
f82a9352 2706 u32 flags;
bcb5febb 2707 int rx_work = 0;
b01867cb
AA
2708 struct sk_buff *skb;
2709 int len;
1da177e4 2710
b01867cb
AA
2711 while((np->get_rx.orig != np->put_rx.orig) &&
2712 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
bcb5febb 2713 (rx_work < limit)) {
1da177e4 2714
761fcd9e
AA
2715 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2716 dev->name, flags);
1da177e4 2717
1da177e4
LT
2718 /*
2719 * the packet is for us - immediately tear down the pci mapping.
2720 * TODO: check if a prefetch of the first cacheline improves
2721 * the performance.
2722 */
761fcd9e
AA
2723 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2724 np->get_rx_ctx->dma_len,
1da177e4 2725 PCI_DMA_FROMDEVICE);
0d63fb32
AA
2726 skb = np->get_rx_ctx->skb;
2727 np->get_rx_ctx->skb = NULL;
1da177e4
LT
2728
2729 {
2730 int j;
f82a9352 2731 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
1da177e4
LT
2732 for (j=0; j<64; j++) {
2733 if ((j%16) == 0)
2734 dprintk("\n%03x:", j);
0d63fb32 2735 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1da177e4
LT
2736 }
2737 dprintk("\n");
2738 }
2739 /* look at what we actually got: */
2740 if (np->desc_ver == DESC_VER_1) {
b01867cb
AA
2741 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2742 len = flags & LEN_MASK_V1;
2743 if (unlikely(flags & NV_RX_ERROR)) {
1ef6841b 2744 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
b01867cb
AA
2745 len = nv_getlen(dev, skb->data, len);
2746 if (len < 0) {
8148ff45 2747 dev->stats.rx_errors++;
b01867cb
AA
2748 dev_kfree_skb(skb);
2749 goto next_pkt;
2750 }
2751 }
2752 /* framing errors are soft errors */
1ef6841b 2753 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
b01867cb
AA
2754 if (flags & NV_RX_SUBSTRACT1) {
2755 len--;
2756 }
2757 }
2758 /* the rest are hard errors */
2759 else {
2760 if (flags & NV_RX_MISSEDFRAME)
8148ff45 2761 dev->stats.rx_missed_errors++;
b01867cb 2762 if (flags & NV_RX_CRCERR)
8148ff45 2763 dev->stats.rx_crc_errors++;
b01867cb 2764 if (flags & NV_RX_OVERFLOW)
8148ff45
JG
2765 dev->stats.rx_over_errors++;
2766 dev->stats.rx_errors++;
0d63fb32 2767 dev_kfree_skb(skb);
a971c324
AA
2768 goto next_pkt;
2769 }
2770 }
b01867cb 2771 } else {
0d63fb32 2772 dev_kfree_skb(skb);
1da177e4 2773 goto next_pkt;
0d63fb32 2774 }
b01867cb
AA
2775 } else {
2776 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2777 len = flags & LEN_MASK_V2;
2778 if (unlikely(flags & NV_RX2_ERROR)) {
1ef6841b 2779 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
b01867cb
AA
2780 len = nv_getlen(dev, skb->data, len);
2781 if (len < 0) {
8148ff45 2782 dev->stats.rx_errors++;
b01867cb
AA
2783 dev_kfree_skb(skb);
2784 goto next_pkt;
2785 }
2786 }
2787 /* framing errors are soft errors */
1ef6841b 2788 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
b01867cb
AA
2789 if (flags & NV_RX2_SUBSTRACT1) {
2790 len--;
2791 }
2792 }
2793 /* the rest are hard errors */
2794 else {
2795 if (flags & NV_RX2_CRCERR)
8148ff45 2796 dev->stats.rx_crc_errors++;
b01867cb 2797 if (flags & NV_RX2_OVERFLOW)
8148ff45
JG
2798 dev->stats.rx_over_errors++;
2799 dev->stats.rx_errors++;
0d63fb32 2800 dev_kfree_skb(skb);
a971c324
AA
2801 goto next_pkt;
2802 }
2803 }
bfaffe8f
AA
2804 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2805 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
0d63fb32 2806 skb->ip_summed = CHECKSUM_UNNECESSARY;
b01867cb
AA
2807 } else {
2808 dev_kfree_skb(skb);
2809 goto next_pkt;
1da177e4
LT
2810 }
2811 }
2812 /* got a valid packet - forward it to the network core */
1da177e4
LT
2813 skb_put(skb, len);
2814 skb->protocol = eth_type_trans(skb, dev);
761fcd9e
AA
2815 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2816 dev->name, len, skb->protocol);
e27cdba5 2817#ifdef CONFIG_FORCEDETH_NAPI
b01867cb 2818 netif_receive_skb(skb);
e27cdba5 2819#else
b01867cb 2820 netif_rx(skb);
e27cdba5 2821#endif
8148ff45
JG
2822 dev->stats.rx_packets++;
2823 dev->stats.rx_bytes += len;
1da177e4 2824next_pkt:
b01867cb 2825 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
86b22b0d 2826 np->get_rx.orig = np->first_rx.orig;
b01867cb 2827 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
86b22b0d 2828 np->get_rx_ctx = np->first_rx_ctx;
bcb5febb
IM
2829
2830 rx_work++;
86b22b0d
AA
2831 }
2832
bcb5febb 2833 return rx_work;
86b22b0d
AA
2834}
2835
2836static int nv_rx_process_optimized(struct net_device *dev, int limit)
2837{
2838 struct fe_priv *np = netdev_priv(dev);
2839 u32 flags;
2840 u32 vlanflags = 0;
c1b7151a 2841 int rx_work = 0;
b01867cb
AA
2842 struct sk_buff *skb;
2843 int len;
86b22b0d 2844
b01867cb
AA
2845 while((np->get_rx.ex != np->put_rx.ex) &&
2846 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
c1b7151a 2847 (rx_work < limit)) {
86b22b0d
AA
2848
2849 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2850 dev->name, flags);
2851
86b22b0d
AA
2852 /*
2853 * the packet is for us - immediately tear down the pci mapping.
2854 * TODO: check if a prefetch of the first cacheline improves
2855 * the performance.
2856 */
2857 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2858 np->get_rx_ctx->dma_len,
2859 PCI_DMA_FROMDEVICE);
2860 skb = np->get_rx_ctx->skb;
2861 np->get_rx_ctx->skb = NULL;
2862
2863 {
2864 int j;
2865 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2866 for (j=0; j<64; j++) {
2867 if ((j%16) == 0)
2868 dprintk("\n%03x:", j);
2869 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2870 }
2871 dprintk("\n");
761fcd9e 2872 }
86b22b0d 2873 /* look at what we actually got: */
b01867cb
AA
2874 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2875 len = flags & LEN_MASK_V2;
2876 if (unlikely(flags & NV_RX2_ERROR)) {
1ef6841b 2877 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
b01867cb
AA
2878 len = nv_getlen(dev, skb->data, len);
2879 if (len < 0) {
b01867cb
AA
2880 dev_kfree_skb(skb);
2881 goto next_pkt;
2882 }
2883 }
2884 /* framing errors are soft errors */
1ef6841b 2885 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
b01867cb
AA
2886 if (flags & NV_RX2_SUBSTRACT1) {
2887 len--;
2888 }
2889 }
2890 /* the rest are hard errors */
2891 else {
86b22b0d
AA
2892 dev_kfree_skb(skb);
2893 goto next_pkt;
2894 }
2895 }
b01867cb 2896
bfaffe8f
AA
2897 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2898 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
86b22b0d 2899 skb->ip_summed = CHECKSUM_UNNECESSARY;
b01867cb
AA
2900
2901 /* got a valid packet - forward it to the network core */
2902 skb_put(skb, len);
2903 skb->protocol = eth_type_trans(skb, dev);
2904 prefetch(skb->data);
2905
2906 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2907 dev->name, len, skb->protocol);
2908
2909 if (likely(!np->vlangrp)) {
86b22b0d 2910#ifdef CONFIG_FORCEDETH_NAPI
b01867cb 2911 netif_receive_skb(skb);
86b22b0d 2912#else
b01867cb 2913 netif_rx(skb);
86b22b0d 2914#endif
b01867cb
AA
2915 } else {
2916 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2917 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2918#ifdef CONFIG_FORCEDETH_NAPI
2919 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2920 vlanflags & NV_RX3_VLAN_TAG_MASK);
2921#else
2922 vlan_hwaccel_rx(skb, np->vlangrp,
2923 vlanflags & NV_RX3_VLAN_TAG_MASK);
2924#endif
2925 } else {
2926#ifdef CONFIG_FORCEDETH_NAPI
2927 netif_receive_skb(skb);
2928#else
2929 netif_rx(skb);
2930#endif
2931 }
2932 }
2933
8148ff45
JG
2934 dev->stats.rx_packets++;
2935 dev->stats.rx_bytes += len;
b01867cb
AA
2936 } else {
2937 dev_kfree_skb(skb);
2938 }
86b22b0d 2939next_pkt:
b01867cb 2940 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
86b22b0d 2941 np->get_rx.ex = np->first_rx.ex;
b01867cb 2942 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
761fcd9e 2943 np->get_rx_ctx = np->first_rx_ctx;
c1b7151a
IM
2944
2945 rx_work++;
1da177e4 2946 }
e27cdba5 2947
c1b7151a 2948 return rx_work;
1da177e4
LT
2949}
2950
d81c0983
MS
2951static void set_bufsize(struct net_device *dev)
2952{
2953 struct fe_priv *np = netdev_priv(dev);
2954
2955 if (dev->mtu <= ETH_DATA_LEN)
2956 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2957 else
2958 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2959}
2960
1da177e4
LT
2961/*
2962 * nv_change_mtu: dev->change_mtu function
2963 * Called with dev_base_lock held for read.
2964 */
2965static int nv_change_mtu(struct net_device *dev, int new_mtu)
2966{
ac9c1897 2967 struct fe_priv *np = netdev_priv(dev);
d81c0983
MS
2968 int old_mtu;
2969
2970 if (new_mtu < 64 || new_mtu > np->pkt_limit)
1da177e4 2971 return -EINVAL;
d81c0983
MS
2972
2973 old_mtu = dev->mtu;
1da177e4 2974 dev->mtu = new_mtu;
d81c0983
MS
2975
2976 /* return early if the buffer sizes will not change */
2977 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2978 return 0;
2979 if (old_mtu == new_mtu)
2980 return 0;
2981
2982 /* synchronized against open : rtnl_lock() held by caller */
2983 if (netif_running(dev)) {
25097d4b 2984 u8 __iomem *base = get_hwbase(dev);
d81c0983
MS
2985 /*
2986 * It seems that the nic preloads valid ring entries into an
2987 * internal buffer. The procedure for flushing everything is
2988 * guessed, there is probably a simpler approach.
2989 * Changing the MTU is a rare event, it shouldn't matter.
2990 */
84b3932b 2991 nv_disable_irq(dev);
08d93575 2992 nv_napi_disable(dev);
932ff279 2993 netif_tx_lock_bh(dev);
e308a5d8 2994 netif_addr_lock(dev);
d81c0983
MS
2995 spin_lock(&np->lock);
2996 /* stop engines */
36b30ea9 2997 nv_stop_rxtx(dev);
d81c0983
MS
2998 nv_txrx_reset(dev);
2999 /* drain rx queue */
36b30ea9 3000 nv_drain_rxtx(dev);
d81c0983 3001 /* reinit driver view of the rx queue */
d81c0983 3002 set_bufsize(dev);
eafa59f6 3003 if (nv_init_ring(dev)) {
d81c0983
MS
3004 if (!np->in_shutdown)
3005 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3006 }
3007 /* reinit nic view of the rx queue */
3008 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
0832b25a 3009 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
eafa59f6 3010 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
d81c0983
MS
3011 base + NvRegRingSizes);
3012 pci_push(base);
8a4ae7f2 3013 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
d81c0983
MS
3014 pci_push(base);
3015
3016 /* restart rx engine */
36b30ea9 3017 nv_start_rxtx(dev);
d81c0983 3018 spin_unlock(&np->lock);
e308a5d8 3019 netif_addr_unlock(dev);
932ff279 3020 netif_tx_unlock_bh(dev);
08d93575 3021 nv_napi_enable(dev);
84b3932b 3022 nv_enable_irq(dev);
d81c0983 3023 }
1da177e4
LT
3024 return 0;
3025}
3026
72b31782
MS
3027static void nv_copy_mac_to_hw(struct net_device *dev)
3028{
25097d4b 3029 u8 __iomem *base = get_hwbase(dev);
72b31782
MS
3030 u32 mac[2];
3031
3032 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
3033 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
3034 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
3035
3036 writel(mac[0], base + NvRegMacAddrA);
3037 writel(mac[1], base + NvRegMacAddrB);
3038}
3039
3040/*
3041 * nv_set_mac_address: dev->set_mac_address function
3042 * Called with rtnl_lock() held.
3043 */
3044static int nv_set_mac_address(struct net_device *dev, void *addr)
3045{
ac9c1897 3046 struct fe_priv *np = netdev_priv(dev);
72b31782
MS
3047 struct sockaddr *macaddr = (struct sockaddr*)addr;
3048
f82a9352 3049 if (!is_valid_ether_addr(macaddr->sa_data))
72b31782
MS
3050 return -EADDRNOTAVAIL;
3051
3052 /* synchronized against open : rtnl_lock() held by caller */
3053 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
3054
3055 if (netif_running(dev)) {
932ff279 3056 netif_tx_lock_bh(dev);
e308a5d8 3057 netif_addr_lock(dev);
72b31782
MS
3058 spin_lock_irq(&np->lock);
3059
3060 /* stop rx engine */
3061 nv_stop_rx(dev);
3062
3063 /* set mac address */
3064 nv_copy_mac_to_hw(dev);
3065
3066 /* restart rx engine */
3067 nv_start_rx(dev);
3068 spin_unlock_irq(&np->lock);
e308a5d8 3069 netif_addr_unlock(dev);
932ff279 3070 netif_tx_unlock_bh(dev);
72b31782
MS
3071 } else {
3072 nv_copy_mac_to_hw(dev);
3073 }
3074 return 0;
3075}
3076
1da177e4
LT
3077/*
3078 * nv_set_multicast: dev->set_multicast function
932ff279 3079 * Called with netif_tx_lock held.
1da177e4
LT
3080 */
3081static void nv_set_multicast(struct net_device *dev)
3082{
ac9c1897 3083 struct fe_priv *np = netdev_priv(dev);
1da177e4
LT
3084 u8 __iomem *base = get_hwbase(dev);
3085 u32 addr[2];
3086 u32 mask[2];
b6d0773f 3087 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
1da177e4
LT
3088
3089 memset(addr, 0, sizeof(addr));
3090 memset(mask, 0, sizeof(mask));
3091
3092 if (dev->flags & IFF_PROMISC) {
b6d0773f 3093 pff |= NVREG_PFF_PROMISC;
1da177e4 3094 } else {
b6d0773f 3095 pff |= NVREG_PFF_MYADDR;
1da177e4
LT
3096
3097 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
3098 u32 alwaysOff[2];
3099 u32 alwaysOn[2];
3100
3101 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
3102 if (dev->flags & IFF_ALLMULTI) {
3103 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3104 } else {
3105 struct dev_mc_list *walk;
3106
3107 walk = dev->mc_list;
3108 while (walk != NULL) {
3109 u32 a, b;
5bb7ea26
AV
3110 a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
3111 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
1da177e4
LT
3112 alwaysOn[0] &= a;
3113 alwaysOff[0] &= ~a;
3114 alwaysOn[1] &= b;
3115 alwaysOff[1] &= ~b;
3116 walk = walk->next;
3117 }
3118 }
3119 addr[0] = alwaysOn[0];
3120 addr[1] = alwaysOn[1];
3121 mask[0] = alwaysOn[0] | alwaysOff[0];
3122 mask[1] = alwaysOn[1] | alwaysOff[1];
bb9a4fd1
AA
3123 } else {
3124 mask[0] = NVREG_MCASTMASKA_NONE;
3125 mask[1] = NVREG_MCASTMASKB_NONE;
1da177e4
LT
3126 }
3127 }
3128 addr[0] |= NVREG_MCASTADDRA_FORCE;
3129 pff |= NVREG_PFF_ALWAYS;
3130 spin_lock_irq(&np->lock);
3131 nv_stop_rx(dev);
3132 writel(addr[0], base + NvRegMulticastAddrA);
3133 writel(addr[1], base + NvRegMulticastAddrB);
3134 writel(mask[0], base + NvRegMulticastMaskA);
3135 writel(mask[1], base + NvRegMulticastMaskB);
3136 writel(pff, base + NvRegPacketFilterFlags);
3137 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
3138 dev->name);
3139 nv_start_rx(dev);
3140 spin_unlock_irq(&np->lock);
3141}
3142
c7985051 3143static void nv_update_pause(struct net_device *dev, u32 pause_flags)
b6d0773f
AA
3144{
3145 struct fe_priv *np = netdev_priv(dev);
3146 u8 __iomem *base = get_hwbase(dev);
3147
3148 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3149
3150 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3151 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3152 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3153 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3154 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3155 } else {
3156 writel(pff, base + NvRegPacketFilterFlags);
3157 }
3158 }
3159 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3160 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3161 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
5289b4c4
AA
3162 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3163 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3164 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
9a33e883 3165 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
5289b4c4 3166 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
9a33e883
AA
3167 /* limit the number of tx pause frames to a default of 8 */
3168 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3169 }
5289b4c4 3170 writel(pause_enable, base + NvRegTxPauseFrame);
b6d0773f
AA
3171 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3172 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3173 } else {
3174 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3175 writel(regmisc, base + NvRegMisc1);
3176 }
3177 }
3178}
3179
4ea7f299
AA
3180/**
3181 * nv_update_linkspeed: Setup the MAC according to the link partner
3182 * @dev: Network device to be configured
3183 *
3184 * The function queries the PHY and checks if there is a link partner.
3185 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3186 * set to 10 MBit HD.
3187 *
3188 * The function returns 0 if there is no link partner and 1 if there is
3189 * a good link partner.
3190 */
1da177e4
LT
3191static int nv_update_linkspeed(struct net_device *dev)
3192{
ac9c1897 3193 struct fe_priv *np = netdev_priv(dev);
1da177e4 3194 u8 __iomem *base = get_hwbase(dev);
eb91f61b
AA
3195 int adv = 0;
3196 int lpa = 0;
3197 int adv_lpa, adv_pause, lpa_pause;
1da177e4
LT
3198 int newls = np->linkspeed;
3199 int newdup = np->duplex;
3200 int mii_status;
3201 int retval = 0;
9744e218 3202 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
b2976d23 3203 u32 txrxFlags = 0;
fd9b558c 3204 u32 phy_exp;
1da177e4
LT
3205
3206 /* BMSR_LSTATUS is latched, read it twice:
3207 * we want the current value.
3208 */
3209 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3210 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3211
3212 if (!(mii_status & BMSR_LSTATUS)) {
3213 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
3214 dev->name);
3215 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3216 newdup = 0;
3217 retval = 0;
3218 goto set_speed;
3219 }
3220
3221 if (np->autoneg == 0) {
3222 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3223 dev->name, np->fixed_mode);
3224 if (np->fixed_mode & LPA_100FULL) {
3225 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3226 newdup = 1;
3227 } else if (np->fixed_mode & LPA_100HALF) {
3228 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3229 newdup = 0;
3230 } else if (np->fixed_mode & LPA_10FULL) {
3231 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3232 newdup = 1;
3233 } else {
3234 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3235 newdup = 0;
3236 }
3237 retval = 1;
3238 goto set_speed;
3239 }
3240 /* check auto negotiation is complete */
3241 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3242 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3243 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3244 newdup = 0;
3245 retval = 0;
3246 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
3247 goto set_speed;
3248 }
3249
b6d0773f
AA
3250 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3251 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3252 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3253 dev->name, adv, lpa);
3254
1da177e4
LT
3255 retval = 1;
3256 if (np->gigabit == PHY_GIGABIT) {
eb91f61b
AA
3257 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3258 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
1da177e4
LT
3259
3260 if ((control_1000 & ADVERTISE_1000FULL) &&
3261 (status_1000 & LPA_1000FULL)) {
3262 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
3263 dev->name);
3264 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3265 newdup = 1;
3266 goto set_speed;
3267 }
3268 }
3269
1da177e4 3270 /* FIXME: handle parallel detection properly */
eb91f61b
AA
3271 adv_lpa = lpa & adv;
3272 if (adv_lpa & LPA_100FULL) {
1da177e4
LT
3273 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3274 newdup = 1;
eb91f61b 3275 } else if (adv_lpa & LPA_100HALF) {
1da177e4
LT
3276 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3277 newdup = 0;
eb91f61b 3278 } else if (adv_lpa & LPA_10FULL) {
1da177e4
LT
3279 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3280 newdup = 1;
eb91f61b 3281 } else if (adv_lpa & LPA_10HALF) {
1da177e4
LT
3282 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3283 newdup = 0;
3284 } else {
eb91f61b 3285 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
1da177e4
LT
3286 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3287 newdup = 0;
3288 }
3289
3290set_speed:
3291 if (np->duplex == newdup && np->linkspeed == newls)
3292 return retval;
3293
3294 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
3295 dev->name, np->linkspeed, np->duplex, newls, newdup);
3296
3297 np->duplex = newdup;
3298 np->linkspeed = newls;
3299
b2976d23
AA
3300 /* The transmitter and receiver must be restarted for safe update */
3301 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3302 txrxFlags |= NV_RESTART_TX;
3303 nv_stop_tx(dev);
3304 }
3305 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3306 txrxFlags |= NV_RESTART_RX;
3307 nv_stop_rx(dev);
3308 }
3309
1da177e4 3310 if (np->gigabit == PHY_GIGABIT) {
a433686c 3311 phyreg = readl(base + NvRegSlotTime);
1da177e4 3312 phyreg &= ~(0x3FF00);
a433686c
AA
3313 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3314 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3315 phyreg |= NVREG_SLOTTIME_10_100_FULL;
1da177e4 3316 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
a433686c
AA
3317 phyreg |= NVREG_SLOTTIME_1000_FULL;
3318 writel(phyreg, base + NvRegSlotTime);
1da177e4
LT
3319 }
3320
3321 phyreg = readl(base + NvRegPhyInterface);
3322 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3323 if (np->duplex == 0)
3324 phyreg |= PHY_HALF;
3325 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3326 phyreg |= PHY_100;
3327 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3328 phyreg |= PHY_1000;
3329 writel(phyreg, base + NvRegPhyInterface);
3330
fd9b558c 3331 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
9744e218 3332 if (phyreg & PHY_RGMII) {
fd9b558c 3333 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
9744e218 3334 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
fd9b558c
AA
3335 } else {
3336 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3337 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3338 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3339 else
3340 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3341 } else {
3342 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3343 }
3344 }
9744e218 3345 } else {
fd9b558c
AA
3346 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3347 txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3348 else
3349 txreg = NVREG_TX_DEFERRAL_DEFAULT;
9744e218
AA
3350 }
3351 writel(txreg, base + NvRegTxDeferral);
3352
95d161cb
AA
3353 if (np->desc_ver == DESC_VER_1) {
3354 txreg = NVREG_TX_WM_DESC1_DEFAULT;
3355 } else {
3356 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3357 txreg = NVREG_TX_WM_DESC2_3_1000;
3358 else
3359 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3360 }
3361 writel(txreg, base + NvRegTxWatermark);
3362
1da177e4
LT
3363 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
3364 base + NvRegMisc1);
3365 pci_push(base);
3366 writel(np->linkspeed, base + NvRegLinkSpeed);
3367 pci_push(base);
3368
b6d0773f
AA
3369 pause_flags = 0;
3370 /* setup pause frame */
eb91f61b 3371 if (np->duplex != 0) {
b6d0773f
AA
3372 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3373 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
3374 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
3375
3376 switch (adv_pause) {
f82a9352 3377 case ADVERTISE_PAUSE_CAP:
b6d0773f
AA
3378 if (lpa_pause & LPA_PAUSE_CAP) {
3379 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3380 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3381 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3382 }
3383 break;
f82a9352 3384 case ADVERTISE_PAUSE_ASYM:
b6d0773f
AA
3385 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
3386 {
3387 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3388 }
3389 break;
f82a9352 3390 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
b6d0773f
AA
3391 if (lpa_pause & LPA_PAUSE_CAP)
3392 {
3393 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3394 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3395 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3396 }
3397 if (lpa_pause == LPA_PAUSE_ASYM)
3398 {
3399 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3400 }
3401 break;
f3b197ac 3402 }
eb91f61b 3403 } else {
b6d0773f 3404 pause_flags = np->pause_flags;
eb91f61b
AA
3405 }
3406 }
b6d0773f 3407 nv_update_pause(dev, pause_flags);
eb91f61b 3408
b2976d23
AA
3409 if (txrxFlags & NV_RESTART_TX)
3410 nv_start_tx(dev);
3411 if (txrxFlags & NV_RESTART_RX)
3412 nv_start_rx(dev);
3413
1da177e4
LT
3414 return retval;
3415}
3416
3417static void nv_linkchange(struct net_device *dev)
3418{
3419 if (nv_update_linkspeed(dev)) {
4ea7f299 3420 if (!netif_carrier_ok(dev)) {
1da177e4
LT
3421 netif_carrier_on(dev);
3422 printk(KERN_INFO "%s: link up.\n", dev->name);
88d7d8b0 3423 nv_txrx_gate(dev, false);
4ea7f299 3424 nv_start_rx(dev);
1da177e4 3425 }
1da177e4
LT
3426 } else {
3427 if (netif_carrier_ok(dev)) {
3428 netif_carrier_off(dev);
3429 printk(KERN_INFO "%s: link down.\n", dev->name);
88d7d8b0 3430 nv_txrx_gate(dev, true);
1da177e4
LT
3431 nv_stop_rx(dev);
3432 }
3433 }
3434}
3435
3436static void nv_link_irq(struct net_device *dev)
3437{
3438 u8 __iomem *base = get_hwbase(dev);
3439 u32 miistat;
3440
3441 miistat = readl(base + NvRegMIIStatus);
eb798428 3442 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
1da177e4
LT
3443 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
3444
3445 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3446 nv_linkchange(dev);
3447 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
3448}
3449
4db0ee17
AA
3450static void nv_msi_workaround(struct fe_priv *np)
3451{
3452
3453 /* Need to toggle the msi irq mask within the ethernet device,
3454 * otherwise, future interrupts will not be detected.
3455 */
3456 if (np->msi_flags & NV_MSI_ENABLED) {
3457 u8 __iomem *base = np->base;
3458
3459 writel(0, base + NvRegMSIIrqMask);
3460 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3461 }
3462}
3463
4145ade2
AA
3464static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
3465{
3466 struct fe_priv *np = netdev_priv(dev);
3467
3468 if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
3469 if (total_work > NV_DYNAMIC_THRESHOLD) {
3470 /* transition to poll based interrupts */
3471 np->quiet_count = 0;
3472 if (np->irqmask != NVREG_IRQMASK_CPU) {
3473 np->irqmask = NVREG_IRQMASK_CPU;
3474 return 1;
3475 }
3476 } else {
3477 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3478 np->quiet_count++;
3479 } else {
3480 /* reached a period of low activity, switch
3481 to per tx/rx packet interrupts */
3482 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3483 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3484 return 1;
3485 }
3486 }
3487 }
3488 }
3489 return 0;
3490}
3491
7d12e780 3492static irqreturn_t nv_nic_irq(int foo, void *data)
1da177e4
LT
3493{
3494 struct net_device *dev = (struct net_device *) data;
ac9c1897 3495 struct fe_priv *np = netdev_priv(dev);
1da177e4 3496 u8 __iomem *base = get_hwbase(dev);
4145ade2
AA
3497#ifndef CONFIG_FORCEDETH_NAPI
3498 int total_work = 0;
3499 int loop_count = 0;
3500#endif
1da177e4
LT
3501
3502 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3503
b67874ac
AA
3504 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3505 np->events = readl(base + NvRegIrqStatus);
1b2bb76f 3506 writel(np->events, base + NvRegIrqStatus);
b67874ac
AA
3507 } else {
3508 np->events = readl(base + NvRegMSIXIrqStatus);
1b2bb76f 3509 writel(np->events, base + NvRegMSIXIrqStatus);
b67874ac
AA
3510 }
3511 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3512 if (!(np->events & np->irqmask))
3513 return IRQ_NONE;
1da177e4 3514
b67874ac 3515 nv_msi_workaround(np);
4db0ee17 3516
f27e6f39 3517#ifdef CONFIG_FORCEDETH_NAPI
78c29bd9
ED
3518 if (napi_schedule_prep(&np->napi)) {
3519 /*
3520 * Disable further irq's (msix not enabled with napi)
3521 */
3522 writel(0, base + NvRegIrqMask);
3523 __napi_schedule(&np->napi);
3524 }
f0734ab6 3525
f0734ab6 3526#else
4145ade2
AA
3527 do
3528 {
3529 int work = 0;
3530 if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) {
3531 if (unlikely(nv_alloc_rx(dev))) {
3532 spin_lock(&np->lock);
3533 if (!np->in_shutdown)
3534 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3535 spin_unlock(&np->lock);
3536 }
c5cf9101 3537 }
4145ade2
AA
3538
3539 spin_lock(&np->lock);
3540 work += nv_tx_done(dev, TX_WORK_PER_LOOP);
3541 spin_unlock(&np->lock);
3542
3543 if (!work)
3544 break;
3545
3546 total_work += work;
3547
3548 loop_count++;
3549 }
3550 while (loop_count < max_interrupt_work);
3551
3552 if (nv_change_interrupt_mode(dev, total_work)) {
3553 /* setup new irq mask */
3554 writel(np->irqmask, base + NvRegIrqMask);
b67874ac
AA
3555 }
3556
3557 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3558 spin_lock(&np->lock);
3559 nv_link_irq(dev);
3560 spin_unlock(&np->lock);
3561 }
3562 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3563 spin_lock(&np->lock);
3564 nv_linkchange(dev);
3565 spin_unlock(&np->lock);
3566 np->link_timeout = jiffies + LINK_TIMEOUT;
3567 }
3568 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3569 spin_lock(&np->lock);
3570 /* disable interrupts on the nic */
3571 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3572 writel(0, base + NvRegIrqMask);
3573 else
3574 writel(np->irqmask, base + NvRegIrqMask);
3575 pci_push(base);
3576
3577 if (!np->in_shutdown) {
3578 np->nic_poll_irq = np->irqmask;
3579 np->recover_error = 1;
3580 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
1da177e4 3581 }
b67874ac 3582 spin_unlock(&np->lock);
1da177e4 3583 }
b67874ac 3584#endif
1da177e4
LT
3585 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3586
b67874ac 3587 return IRQ_HANDLED;
1da177e4
LT
3588}
3589
f0734ab6
AA
3590/**
3591 * All _optimized functions are used to help increase performance
3592 * (reduce CPU and increase throughput). They use descripter version 3,
3593 * compiler directives, and reduce memory accesses.
3594 */
86b22b0d
AA
3595static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3596{
3597 struct net_device *dev = (struct net_device *) data;
3598 struct fe_priv *np = netdev_priv(dev);
3599 u8 __iomem *base = get_hwbase(dev);
4145ade2
AA
3600#ifndef CONFIG_FORCEDETH_NAPI
3601 int total_work = 0;
3602 int loop_count = 0;
3603#endif
86b22b0d
AA
3604
3605 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3606
b67874ac
AA
3607 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3608 np->events = readl(base + NvRegIrqStatus);
1b2bb76f 3609 writel(np->events, base + NvRegIrqStatus);
b67874ac
AA
3610 } else {
3611 np->events = readl(base + NvRegMSIXIrqStatus);
1b2bb76f 3612 writel(np->events, base + NvRegMSIXIrqStatus);
b67874ac
AA
3613 }
3614 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3615 if (!(np->events & np->irqmask))
3616 return IRQ_NONE;
86b22b0d 3617
b67874ac 3618 nv_msi_workaround(np);
4db0ee17 3619
f27e6f39 3620#ifdef CONFIG_FORCEDETH_NAPI
78c29bd9
ED
3621 if (napi_schedule_prep(&np->napi)) {
3622 /*
3623 * Disable further irq's (msix not enabled with napi)
3624 */
3625 writel(0, base + NvRegIrqMask);
3626 __napi_schedule(&np->napi);
3627 }
f0734ab6 3628#else
4145ade2
AA
3629 do
3630 {
3631 int work = 0;
3632 if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) {
3633 if (unlikely(nv_alloc_rx_optimized(dev))) {
3634 spin_lock(&np->lock);
3635 if (!np->in_shutdown)
3636 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3637 spin_unlock(&np->lock);
3638 }
86b22b0d 3639 }
4145ade2
AA
3640
3641 spin_lock(&np->lock);
3642 work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3643 spin_unlock(&np->lock);
3644
3645 if (!work)
3646 break;
3647
3648 total_work += work;
3649
3650 loop_count++;
3651 }
3652 while (loop_count < max_interrupt_work);
3653
3654 if (nv_change_interrupt_mode(dev, total_work)) {
3655 /* setup new irq mask */
3656 writel(np->irqmask, base + NvRegIrqMask);
b67874ac 3657 }
86b22b0d 3658
b67874ac
AA
3659 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3660 spin_lock(&np->lock);
3661 nv_link_irq(dev);
3662 spin_unlock(&np->lock);
3663 }
3664 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3665 spin_lock(&np->lock);
3666 nv_linkchange(dev);
3667 spin_unlock(&np->lock);
3668 np->link_timeout = jiffies + LINK_TIMEOUT;
3669 }
3670 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3671 spin_lock(&np->lock);
3672 /* disable interrupts on the nic */
3673 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3674 writel(0, base + NvRegIrqMask);
3675 else
3676 writel(np->irqmask, base + NvRegIrqMask);
3677 pci_push(base);
3678
3679 if (!np->in_shutdown) {
3680 np->nic_poll_irq = np->irqmask;
3681 np->recover_error = 1;
3682 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
86b22b0d 3683 }
b67874ac 3684 spin_unlock(&np->lock);
86b22b0d 3685 }
b67874ac
AA
3686
3687#endif
86b22b0d
AA
3688 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3689
b67874ac 3690 return IRQ_HANDLED;
86b22b0d
AA
3691}
3692
7d12e780 3693static irqreturn_t nv_nic_irq_tx(int foo, void *data)
d33a73c8
AA
3694{
3695 struct net_device *dev = (struct net_device *) data;
3696 struct fe_priv *np = netdev_priv(dev);
3697 u8 __iomem *base = get_hwbase(dev);
3698 u32 events;
3699 int i;
0a07bc64 3700 unsigned long flags;
d33a73c8
AA
3701
3702 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3703
3704 for (i=0; ; i++) {
3705 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3706 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
d33a73c8
AA
3707 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3708 if (!(events & np->irqmask))
3709 break;
3710
0a07bc64 3711 spin_lock_irqsave(&np->lock, flags);
4e16ed1b 3712 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
0a07bc64 3713 spin_unlock_irqrestore(&np->lock, flags);
f3b197ac 3714
f0734ab6 3715 if (unlikely(i > max_interrupt_work)) {
0a07bc64 3716 spin_lock_irqsave(&np->lock, flags);
d33a73c8
AA
3717 /* disable interrupts on the nic */
3718 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3719 pci_push(base);
3720
3721 if (!np->in_shutdown) {
3722 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3723 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3724 }
0a07bc64 3725 spin_unlock_irqrestore(&np->lock, flags);
1a2b7330 3726 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
d33a73c8
AA
3727 break;
3728 }
3729
3730 }
3731 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3732
3733 return IRQ_RETVAL(i);
3734}
3735
e27cdba5 3736#ifdef CONFIG_FORCEDETH_NAPI
bea3348e 3737static int nv_napi_poll(struct napi_struct *napi, int budget)
e27cdba5 3738{
bea3348e
SH
3739 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3740 struct net_device *dev = np->dev;
e27cdba5 3741 u8 __iomem *base = get_hwbase(dev);
d15e9c4d 3742 unsigned long flags;
4145ade2
AA
3743 int retcode;
3744 int tx_work, rx_work;
e27cdba5 3745
36b30ea9 3746 if (!nv_optimized(np)) {
f27e6f39 3747 spin_lock_irqsave(&np->lock, flags);
4145ade2 3748 tx_work = nv_tx_done(dev, np->tx_ring_size);
f27e6f39
AA
3749 spin_unlock_irqrestore(&np->lock, flags);
3750
4145ade2 3751 rx_work = nv_rx_process(dev, budget);
e0379a14
AA
3752 retcode = nv_alloc_rx(dev);
3753 } else {
f27e6f39 3754 spin_lock_irqsave(&np->lock, flags);
4145ade2 3755 tx_work = nv_tx_done_optimized(dev, np->tx_ring_size);
f27e6f39
AA
3756 spin_unlock_irqrestore(&np->lock, flags);
3757
4145ade2 3758 rx_work = nv_rx_process_optimized(dev, budget);
e0379a14
AA
3759 retcode = nv_alloc_rx_optimized(dev);
3760 }
e27cdba5 3761
e0379a14 3762 if (retcode) {
d15e9c4d 3763 spin_lock_irqsave(&np->lock, flags);
e27cdba5
SH
3764 if (!np->in_shutdown)
3765 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
d15e9c4d 3766 spin_unlock_irqrestore(&np->lock, flags);
e27cdba5
SH
3767 }
3768
4145ade2
AA
3769 nv_change_interrupt_mode(dev, tx_work + rx_work);
3770
f27e6f39
AA
3771 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3772 spin_lock_irqsave(&np->lock, flags);
3773 nv_link_irq(dev);
3774 spin_unlock_irqrestore(&np->lock, flags);
3775 }
3776 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3777 spin_lock_irqsave(&np->lock, flags);
3778 nv_linkchange(dev);
3779 spin_unlock_irqrestore(&np->lock, flags);
3780 np->link_timeout = jiffies + LINK_TIMEOUT;
3781 }
3782 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3783 spin_lock_irqsave(&np->lock, flags);
3784 if (!np->in_shutdown) {
3785 np->nic_poll_irq = np->irqmask;
3786 np->recover_error = 1;
3787 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3788 }
3789 spin_unlock_irqrestore(&np->lock, flags);
6c2da9c2 3790 napi_complete(napi);
4145ade2 3791 return rx_work;
f27e6f39
AA
3792 }
3793
4145ade2 3794 if (rx_work < budget) {
f27e6f39
AA
3795 /* re-enable interrupts
3796 (msix not enabled in napi) */
6c2da9c2 3797 napi_complete(napi);
bea3348e 3798
f27e6f39 3799 writel(np->irqmask, base + NvRegIrqMask);
e27cdba5 3800 }
4145ade2 3801 return rx_work;
e27cdba5
SH
3802}
3803#endif
3804
7d12e780 3805static irqreturn_t nv_nic_irq_rx(int foo, void *data)
d33a73c8
AA
3806{
3807 struct net_device *dev = (struct net_device *) data;
3808 struct fe_priv *np = netdev_priv(dev);
3809 u8 __iomem *base = get_hwbase(dev);
3810 u32 events;
3811 int i;
0a07bc64 3812 unsigned long flags;
d33a73c8
AA
3813
3814 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3815
3816 for (i=0; ; i++) {
3817 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3818 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
d33a73c8
AA
3819 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3820 if (!(events & np->irqmask))
3821 break;
f3b197ac 3822
bea3348e 3823 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
f0734ab6
AA
3824 if (unlikely(nv_alloc_rx_optimized(dev))) {
3825 spin_lock_irqsave(&np->lock, flags);
3826 if (!np->in_shutdown)
3827 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3828 spin_unlock_irqrestore(&np->lock, flags);
3829 }
d33a73c8 3830 }
f3b197ac 3831
f0734ab6 3832 if (unlikely(i > max_interrupt_work)) {
0a07bc64 3833 spin_lock_irqsave(&np->lock, flags);
d33a73c8
AA
3834 /* disable interrupts on the nic */
3835 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3836 pci_push(base);
3837
3838 if (!np->in_shutdown) {
3839 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3840 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3841 }
0a07bc64 3842 spin_unlock_irqrestore(&np->lock, flags);
1a2b7330 3843 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
d33a73c8
AA
3844 break;
3845 }
d33a73c8
AA
3846 }
3847 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3848
3849 return IRQ_RETVAL(i);
3850}
3851
7d12e780 3852static irqreturn_t nv_nic_irq_other(int foo, void *data)
d33a73c8
AA
3853{
3854 struct net_device *dev = (struct net_device *) data;
3855 struct fe_priv *np = netdev_priv(dev);
3856 u8 __iomem *base = get_hwbase(dev);
3857 u32 events;
3858 int i;
0a07bc64 3859 unsigned long flags;
d33a73c8
AA
3860
3861 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3862
3863 for (i=0; ; i++) {
3864 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3865 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
d33a73c8
AA
3866 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3867 if (!(events & np->irqmask))
3868 break;
f3b197ac 3869
4e16ed1b
AA
3870 /* check tx in case we reached max loop limit in tx isr */
3871 spin_lock_irqsave(&np->lock, flags);
3872 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3873 spin_unlock_irqrestore(&np->lock, flags);
3874
d33a73c8 3875 if (events & NVREG_IRQ_LINK) {
0a07bc64 3876 spin_lock_irqsave(&np->lock, flags);
d33a73c8 3877 nv_link_irq(dev);
0a07bc64 3878 spin_unlock_irqrestore(&np->lock, flags);
d33a73c8
AA
3879 }
3880 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
0a07bc64 3881 spin_lock_irqsave(&np->lock, flags);
d33a73c8 3882 nv_linkchange(dev);
0a07bc64 3883 spin_unlock_irqrestore(&np->lock, flags);
d33a73c8
AA
3884 np->link_timeout = jiffies + LINK_TIMEOUT;
3885 }
c5cf9101
AA
3886 if (events & NVREG_IRQ_RECOVER_ERROR) {
3887 spin_lock_irq(&np->lock);
3888 /* disable interrupts on the nic */
3889 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3890 pci_push(base);
3891
3892 if (!np->in_shutdown) {
3893 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3894 np->recover_error = 1;
3895 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3896 }
3897 spin_unlock_irq(&np->lock);
3898 break;
3899 }
f0734ab6 3900 if (unlikely(i > max_interrupt_work)) {
0a07bc64 3901 spin_lock_irqsave(&np->lock, flags);
d33a73c8
AA
3902 /* disable interrupts on the nic */
3903 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3904 pci_push(base);
3905
3906 if (!np->in_shutdown) {
3907 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3908 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3909 }
0a07bc64 3910 spin_unlock_irqrestore(&np->lock, flags);
1a2b7330 3911 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
d33a73c8
AA
3912 break;
3913 }
3914
3915 }
3916 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3917
3918 return IRQ_RETVAL(i);
3919}
3920
7d12e780 3921static irqreturn_t nv_nic_irq_test(int foo, void *data)
9589c77a
AA
3922{
3923 struct net_device *dev = (struct net_device *) data;
3924 struct fe_priv *np = netdev_priv(dev);
3925 u8 __iomem *base = get_hwbase(dev);
3926 u32 events;
3927
3928 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3929
3930 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3931 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3932 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3933 } else {
3934 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3935 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3936 }
3937 pci_push(base);
3938 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3939 if (!(events & NVREG_IRQ_TIMER))
3940 return IRQ_RETVAL(0);
3941
4db0ee17
AA
3942 nv_msi_workaround(np);
3943
9589c77a
AA
3944 spin_lock(&np->lock);
3945 np->intr_test = 1;
3946 spin_unlock(&np->lock);
3947
3948 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3949
3950 return IRQ_RETVAL(1);
3951}
3952
7a1854b7
AA
3953static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3954{
3955 u8 __iomem *base = get_hwbase(dev);
3956 int i;
3957 u32 msixmap = 0;
3958
3959 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3960 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3961 * the remaining 8 interrupts.
3962 */
3963 for (i = 0; i < 8; i++) {
3964 if ((irqmask >> i) & 0x1) {
3965 msixmap |= vector << (i << 2);
3966 }
3967 }
3968 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3969
3970 msixmap = 0;
3971 for (i = 0; i < 8; i++) {
3972 if ((irqmask >> (i + 8)) & 0x1) {
3973 msixmap |= vector << (i << 2);
3974 }
3975 }
3976 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3977}
3978
9589c77a 3979static int nv_request_irq(struct net_device *dev, int intr_test)
7a1854b7
AA
3980{
3981 struct fe_priv *np = get_nvpriv(dev);
3982 u8 __iomem *base = get_hwbase(dev);
3983 int ret = 1;
3984 int i;
86b22b0d
AA
3985 irqreturn_t (*handler)(int foo, void *data);
3986
3987 if (intr_test) {
3988 handler = nv_nic_irq_test;
3989 } else {
36b30ea9 3990 if (nv_optimized(np))
86b22b0d
AA
3991 handler = nv_nic_irq_optimized;
3992 else
3993 handler = nv_nic_irq;
3994 }
7a1854b7
AA
3995
3996 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3997 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3998 np->msi_x_entry[i].entry = i;
3999 }
4000 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
4001 np->msi_flags |= NV_MSI_X_ENABLED;
9589c77a 4002 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
7a1854b7 4003 /* Request irq for rx handling */
ddb213f0
YL
4004 sprintf(np->name_rx, "%s-rx", dev->name);
4005 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
4006 &nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
7a1854b7
AA
4007 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
4008 pci_disable_msix(np->pci_dev);
4009 np->msi_flags &= ~NV_MSI_X_ENABLED;
4010 goto out_err;
4011 }
4012 /* Request irq for tx handling */
ddb213f0
YL
4013 sprintf(np->name_tx, "%s-tx", dev->name);
4014 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
4015 &nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
7a1854b7
AA
4016 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
4017 pci_disable_msix(np->pci_dev);
4018 np->msi_flags &= ~NV_MSI_X_ENABLED;
4019 goto out_free_rx;
4020 }
4021 /* Request irq for link and timer handling */
ddb213f0
YL
4022 sprintf(np->name_other, "%s-other", dev->name);
4023 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
4024 &nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
7a1854b7
AA
4025 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
4026 pci_disable_msix(np->pci_dev);
4027 np->msi_flags &= ~NV_MSI_X_ENABLED;
4028 goto out_free_tx;
4029 }
4030 /* map interrupts to their respective vector */
4031 writel(0, base + NvRegMSIXMap0);
4032 writel(0, base + NvRegMSIXMap1);
4033 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
4034 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
4035 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
4036 } else {
4037 /* Request irq for all interrupts */
86b22b0d 4038 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
7a1854b7
AA
4039 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
4040 pci_disable_msix(np->pci_dev);
4041 np->msi_flags &= ~NV_MSI_X_ENABLED;
4042 goto out_err;
4043 }
4044
4045 /* map interrupts to vector 0 */
4046 writel(0, base + NvRegMSIXMap0);
4047 writel(0, base + NvRegMSIXMap1);
4048 }
4049 }
4050 }
4051 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
4052 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
4053 np->msi_flags |= NV_MSI_ENABLED;
a7475906 4054 dev->irq = np->pci_dev->irq;
86b22b0d 4055 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
7a1854b7
AA
4056 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
4057 pci_disable_msi(np->pci_dev);
4058 np->msi_flags &= ~NV_MSI_ENABLED;
a7475906 4059 dev->irq = np->pci_dev->irq;
7a1854b7
AA
4060 goto out_err;
4061 }
4062
4063 /* map interrupts to vector 0 */
4064 writel(0, base + NvRegMSIMap0);
4065 writel(0, base + NvRegMSIMap1);
4066 /* enable msi vector 0 */
4067 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
4068 }
4069 }
4070 if (ret != 0) {
86b22b0d 4071 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
7a1854b7 4072 goto out_err;
9589c77a 4073
7a1854b7
AA
4074 }
4075
4076 return 0;
4077out_free_tx:
4078 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
4079out_free_rx:
4080 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
4081out_err:
4082 return 1;
4083}
4084
4085static void nv_free_irq(struct net_device *dev)
4086{
4087 struct fe_priv *np = get_nvpriv(dev);
4088 int i;
4089
4090 if (np->msi_flags & NV_MSI_X_ENABLED) {
4091 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
4092 free_irq(np->msi_x_entry[i].vector, dev);
4093 }
4094 pci_disable_msix(np->pci_dev);
4095 np->msi_flags &= ~NV_MSI_X_ENABLED;
4096 } else {
4097 free_irq(np->pci_dev->irq, dev);
4098 if (np->msi_flags & NV_MSI_ENABLED) {
4099 pci_disable_msi(np->pci_dev);
4100 np->msi_flags &= ~NV_MSI_ENABLED;
4101 }
4102 }
4103}
4104
1da177e4
LT
4105static void nv_do_nic_poll(unsigned long data)
4106{
4107 struct net_device *dev = (struct net_device *) data;
ac9c1897 4108 struct fe_priv *np = netdev_priv(dev);
1da177e4 4109 u8 __iomem *base = get_hwbase(dev);
d33a73c8 4110 u32 mask = 0;
1da177e4 4111
1da177e4 4112 /*
d33a73c8 4113 * First disable irq(s) and then
1da177e4
LT
4114 * reenable interrupts on the nic, we have to do this before calling
4115 * nv_nic_irq because that may decide to do otherwise
4116 */
d33a73c8 4117
84b3932b
AA
4118 if (!using_multi_irqs(dev)) {
4119 if (np->msi_flags & NV_MSI_X_ENABLED)
8688cfce 4120 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
84b3932b 4121 else
a7475906 4122 disable_irq_lockdep(np->pci_dev->irq);
d33a73c8
AA
4123 mask = np->irqmask;
4124 } else {
4125 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
8688cfce 4126 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
d33a73c8
AA
4127 mask |= NVREG_IRQ_RX_ALL;
4128 }
4129 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
8688cfce 4130 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
d33a73c8
AA
4131 mask |= NVREG_IRQ_TX_ALL;
4132 }
4133 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
8688cfce 4134 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
d33a73c8
AA
4135 mask |= NVREG_IRQ_OTHER;
4136 }
4137 }
a7475906
MS
4138 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
4139
c5cf9101
AA
4140 if (np->recover_error) {
4141 np->recover_error = 0;
daa91a9d 4142 printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name);
c5cf9101
AA
4143 if (netif_running(dev)) {
4144 netif_tx_lock_bh(dev);
e308a5d8 4145 netif_addr_lock(dev);
c5cf9101
AA
4146 spin_lock(&np->lock);
4147 /* stop engines */
36b30ea9 4148 nv_stop_rxtx(dev);
daa91a9d
AA
4149 if (np->driver_data & DEV_HAS_POWER_CNTRL)
4150 nv_mac_reset(dev);
c5cf9101
AA
4151 nv_txrx_reset(dev);
4152 /* drain rx queue */
36b30ea9 4153 nv_drain_rxtx(dev);
c5cf9101
AA
4154 /* reinit driver view of the rx queue */
4155 set_bufsize(dev);
4156 if (nv_init_ring(dev)) {
4157 if (!np->in_shutdown)
4158 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4159 }
4160 /* reinit nic view of the rx queue */
4161 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4162 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4163 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4164 base + NvRegRingSizes);
4165 pci_push(base);
4166 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4167 pci_push(base);
daa91a9d
AA
4168 /* clear interrupts */
4169 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4170 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4171 else
4172 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
c5cf9101
AA
4173
4174 /* restart rx engine */
36b30ea9 4175 nv_start_rxtx(dev);
c5cf9101 4176 spin_unlock(&np->lock);
e308a5d8 4177 netif_addr_unlock(dev);
c5cf9101
AA
4178 netif_tx_unlock_bh(dev);
4179 }
4180 }
4181
d33a73c8 4182 writel(mask, base + NvRegIrqMask);
1da177e4 4183 pci_push(base);
d33a73c8 4184
84b3932b 4185 if (!using_multi_irqs(dev)) {
79d30a58 4186 np->nic_poll_irq = 0;
36b30ea9 4187 if (nv_optimized(np))
fcc5f266
AA
4188 nv_nic_irq_optimized(0, dev);
4189 else
4190 nv_nic_irq(0, dev);
84b3932b 4191 if (np->msi_flags & NV_MSI_X_ENABLED)
8688cfce 4192 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
84b3932b 4193 else
a7475906 4194 enable_irq_lockdep(np->pci_dev->irq);
d33a73c8
AA
4195 } else {
4196 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
79d30a58 4197 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
7d12e780 4198 nv_nic_irq_rx(0, dev);
8688cfce 4199 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
d33a73c8
AA
4200 }
4201 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
79d30a58 4202 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
7d12e780 4203 nv_nic_irq_tx(0, dev);
8688cfce 4204 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
d33a73c8
AA
4205 }
4206 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
79d30a58 4207 np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
7d12e780 4208 nv_nic_irq_other(0, dev);
8688cfce 4209 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
d33a73c8
AA
4210 }
4211 }
79d30a58 4212
1da177e4
LT
4213}
4214
2918c35d
MS
4215#ifdef CONFIG_NET_POLL_CONTROLLER
4216static void nv_poll_controller(struct net_device *dev)
4217{
4218 nv_do_nic_poll((unsigned long) dev);
4219}
4220#endif
4221
52da3578
AA
4222static void nv_do_stats_poll(unsigned long data)
4223{
4224 struct net_device *dev = (struct net_device *) data;
4225 struct fe_priv *np = netdev_priv(dev);
52da3578 4226
57fff698 4227 nv_get_hw_stats(dev);
52da3578
AA
4228
4229 if (!np->in_shutdown)
bfebbb88
DD
4230 mod_timer(&np->stats_poll,
4231 round_jiffies(jiffies + STATS_INTERVAL));
52da3578
AA
4232}
4233
1da177e4
LT
4234static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4235{
ac9c1897 4236 struct fe_priv *np = netdev_priv(dev);
3f88ce49 4237 strcpy(info->driver, DRV_NAME);
1da177e4
LT
4238 strcpy(info->version, FORCEDETH_VERSION);
4239 strcpy(info->bus_info, pci_name(np->pci_dev));
4240}
4241
4242static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4243{
ac9c1897 4244 struct fe_priv *np = netdev_priv(dev);
1da177e4
LT
4245 wolinfo->supported = WAKE_MAGIC;
4246
4247 spin_lock_irq(&np->lock);
4248 if (np->wolenabled)
4249 wolinfo->wolopts = WAKE_MAGIC;
4250 spin_unlock_irq(&np->lock);
4251}
4252
4253static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4254{
ac9c1897 4255 struct fe_priv *np = netdev_priv(dev);
1da177e4 4256 u8 __iomem *base = get_hwbase(dev);
c42d9df9 4257 u32 flags = 0;
1da177e4 4258
1da177e4 4259 if (wolinfo->wolopts == 0) {
1da177e4 4260 np->wolenabled = 0;
c42d9df9 4261 } else if (wolinfo->wolopts & WAKE_MAGIC) {
1da177e4 4262 np->wolenabled = 1;
c42d9df9
AA
4263 flags = NVREG_WAKEUPFLAGS_ENABLE;
4264 }
4265 if (netif_running(dev)) {
4266 spin_lock_irq(&np->lock);
4267 writel(flags, base + NvRegWakeUpFlags);
4268 spin_unlock_irq(&np->lock);
1da177e4 4269 }
1da177e4
LT
4270 return 0;
4271}
4272
4273static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4274{
4275 struct fe_priv *np = netdev_priv(dev);
4276 int adv;
4277
4278 spin_lock_irq(&np->lock);
4279 ecmd->port = PORT_MII;
4280 if (!netif_running(dev)) {
4281 /* We do not track link speed / duplex setting if the
4282 * interface is disabled. Force a link check */
f9430a01
AA
4283 if (nv_update_linkspeed(dev)) {
4284 if (!netif_carrier_ok(dev))
4285 netif_carrier_on(dev);
4286 } else {
4287 if (netif_carrier_ok(dev))
4288 netif_carrier_off(dev);
4289 }
1da177e4 4290 }
f9430a01
AA
4291
4292 if (netif_carrier_ok(dev)) {
4293 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
1da177e4
LT
4294 case NVREG_LINKSPEED_10:
4295 ecmd->speed = SPEED_10;
4296 break;
4297 case NVREG_LINKSPEED_100:
4298 ecmd->speed = SPEED_100;
4299 break;
4300 case NVREG_LINKSPEED_1000:
4301 ecmd->speed = SPEED_1000;
4302 break;
f9430a01
AA
4303 }
4304 ecmd->duplex = DUPLEX_HALF;
4305 if (np->duplex)
4306 ecmd->duplex = DUPLEX_FULL;
4307 } else {
4308 ecmd->speed = -1;
4309 ecmd->duplex = -1;
1da177e4 4310 }
1da177e4
LT
4311
4312 ecmd->autoneg = np->autoneg;
4313
4314 ecmd->advertising = ADVERTISED_MII;
4315 if (np->autoneg) {
4316 ecmd->advertising |= ADVERTISED_Autoneg;
4317 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
f9430a01
AA
4318 if (adv & ADVERTISE_10HALF)
4319 ecmd->advertising |= ADVERTISED_10baseT_Half;
4320 if (adv & ADVERTISE_10FULL)
4321 ecmd->advertising |= ADVERTISED_10baseT_Full;
4322 if (adv & ADVERTISE_100HALF)
4323 ecmd->advertising |= ADVERTISED_100baseT_Half;
4324 if (adv & ADVERTISE_100FULL)
4325 ecmd->advertising |= ADVERTISED_100baseT_Full;
4326 if (np->gigabit == PHY_GIGABIT) {
4327 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4328 if (adv & ADVERTISE_1000FULL)
4329 ecmd->advertising |= ADVERTISED_1000baseT_Full;
4330 }
1da177e4 4331 }
1da177e4
LT
4332 ecmd->supported = (SUPPORTED_Autoneg |
4333 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4334 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4335 SUPPORTED_MII);
4336 if (np->gigabit == PHY_GIGABIT)
4337 ecmd->supported |= SUPPORTED_1000baseT_Full;
4338
4339 ecmd->phy_address = np->phyaddr;
4340 ecmd->transceiver = XCVR_EXTERNAL;
4341
4342 /* ignore maxtxpkt, maxrxpkt for now */
4343 spin_unlock_irq(&np->lock);
4344 return 0;
4345}
4346
4347static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4348{
4349 struct fe_priv *np = netdev_priv(dev);
4350
4351 if (ecmd->port != PORT_MII)
4352 return -EINVAL;
4353 if (ecmd->transceiver != XCVR_EXTERNAL)
4354 return -EINVAL;
4355 if (ecmd->phy_address != np->phyaddr) {
4356 /* TODO: support switching between multiple phys. Should be
4357 * trivial, but not enabled due to lack of test hardware. */
4358 return -EINVAL;
4359 }
4360 if (ecmd->autoneg == AUTONEG_ENABLE) {
4361 u32 mask;
4362
4363 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4364 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4365 if (np->gigabit == PHY_GIGABIT)
4366 mask |= ADVERTISED_1000baseT_Full;
4367
4368 if ((ecmd->advertising & mask) == 0)
4369 return -EINVAL;
4370
4371 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
4372 /* Note: autonegotiation disable, speed 1000 intentionally
4373 * forbidden - noone should need that. */
4374
4375 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
4376 return -EINVAL;
4377 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4378 return -EINVAL;
4379 } else {
4380 return -EINVAL;
4381 }
4382
f9430a01
AA
4383 netif_carrier_off(dev);
4384 if (netif_running(dev)) {
97bff095
TD
4385 unsigned long flags;
4386
f9430a01 4387 nv_disable_irq(dev);
58dfd9c1 4388 netif_tx_lock_bh(dev);
e308a5d8 4389 netif_addr_lock(dev);
97bff095
TD
4390 /* with plain spinlock lockdep complains */
4391 spin_lock_irqsave(&np->lock, flags);
f9430a01 4392 /* stop engines */
97bff095
TD
4393 /* FIXME:
4394 * this can take some time, and interrupts are disabled
4395 * due to spin_lock_irqsave, but let's hope no daemon
4396 * is going to change the settings very often...
4397 * Worst case:
4398 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4399 * + some minor delays, which is up to a second approximately
4400 */
36b30ea9 4401 nv_stop_rxtx(dev);
97bff095 4402 spin_unlock_irqrestore(&np->lock, flags);
e308a5d8 4403 netif_addr_unlock(dev);
58dfd9c1 4404 netif_tx_unlock_bh(dev);
f9430a01
AA
4405 }
4406
1da177e4
LT
4407 if (ecmd->autoneg == AUTONEG_ENABLE) {
4408 int adv, bmcr;
4409
4410 np->autoneg = 1;
4411
4412 /* advertise only what has been requested */
4413 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
eb91f61b 4414 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1da177e4
LT
4415 if (ecmd->advertising & ADVERTISED_10baseT_Half)
4416 adv |= ADVERTISE_10HALF;
4417 if (ecmd->advertising & ADVERTISED_10baseT_Full)
b6d0773f 4418 adv |= ADVERTISE_10FULL;
1da177e4
LT
4419 if (ecmd->advertising & ADVERTISED_100baseT_Half)
4420 adv |= ADVERTISE_100HALF;
4421 if (ecmd->advertising & ADVERTISED_100baseT_Full)
b6d0773f
AA
4422 adv |= ADVERTISE_100FULL;
4423 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4424 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4425 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4426 adv |= ADVERTISE_PAUSE_ASYM;
1da177e4
LT
4427 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4428
4429 if (np->gigabit == PHY_GIGABIT) {
eb91f61b 4430 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1da177e4
LT
4431 adv &= ~ADVERTISE_1000FULL;
4432 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4433 adv |= ADVERTISE_1000FULL;
eb91f61b 4434 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
1da177e4
LT
4435 }
4436
f9430a01
AA
4437 if (netif_running(dev))
4438 printk(KERN_INFO "%s: link down.\n", dev->name);
1da177e4 4439 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
edf7e5ec
AA
4440 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4441 bmcr |= BMCR_ANENABLE;
4442 /* reset the phy in order for settings to stick,
4443 * and cause autoneg to start */
4444 if (phy_reset(dev, bmcr)) {
4445 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4446 return -EINVAL;
4447 }
4448 } else {
4449 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4450 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4451 }
1da177e4
LT
4452 } else {
4453 int adv, bmcr;
4454
4455 np->autoneg = 0;
4456
4457 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
eb91f61b 4458 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1da177e4
LT
4459 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4460 adv |= ADVERTISE_10HALF;
4461 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
b6d0773f 4462 adv |= ADVERTISE_10FULL;
1da177e4
LT
4463 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4464 adv |= ADVERTISE_100HALF;
4465 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
b6d0773f
AA
4466 adv |= ADVERTISE_100FULL;
4467 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4468 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
4469 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4470 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4471 }
4472 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4473 adv |= ADVERTISE_PAUSE_ASYM;
4474 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4475 }
1da177e4
LT
4476 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4477 np->fixed_mode = adv;
4478
4479 if (np->gigabit == PHY_GIGABIT) {
eb91f61b 4480 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1da177e4 4481 adv &= ~ADVERTISE_1000FULL;
eb91f61b 4482 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
1da177e4
LT
4483 }
4484
4485 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
f9430a01
AA
4486 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4487 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
1da177e4 4488 bmcr |= BMCR_FULLDPLX;
f9430a01 4489 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
1da177e4 4490 bmcr |= BMCR_SPEED100;
f9430a01 4491 if (np->phy_oui == PHY_OUI_MARVELL) {
edf7e5ec
AA
4492 /* reset the phy in order for forced mode settings to stick */
4493 if (phy_reset(dev, bmcr)) {
f9430a01
AA
4494 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4495 return -EINVAL;
4496 }
edf7e5ec
AA
4497 } else {
4498 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4499 if (netif_running(dev)) {
4500 /* Wait a bit and then reconfigure the nic. */
4501 udelay(10);
4502 nv_linkchange(dev);
4503 }
1da177e4
LT
4504 }
4505 }
f9430a01
AA
4506
4507 if (netif_running(dev)) {
36b30ea9 4508 nv_start_rxtx(dev);
f9430a01
AA
4509 nv_enable_irq(dev);
4510 }
1da177e4
LT
4511
4512 return 0;
4513}
4514
dc8216c1 4515#define FORCEDETH_REGS_VER 1
dc8216c1
MS
4516
4517static int nv_get_regs_len(struct net_device *dev)
4518{
86a0f043
AA
4519 struct fe_priv *np = netdev_priv(dev);
4520 return np->register_size;
dc8216c1
MS
4521}
4522
4523static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4524{
ac9c1897 4525 struct fe_priv *np = netdev_priv(dev);
dc8216c1
MS
4526 u8 __iomem *base = get_hwbase(dev);
4527 u32 *rbuf = buf;
4528 int i;
4529
4530 regs->version = FORCEDETH_REGS_VER;
4531 spin_lock_irq(&np->lock);
86a0f043 4532 for (i = 0;i <= np->register_size/sizeof(u32); i++)
dc8216c1
MS
4533 rbuf[i] = readl(base + i*sizeof(u32));
4534 spin_unlock_irq(&np->lock);
4535}
4536
4537static int nv_nway_reset(struct net_device *dev)
4538{
ac9c1897 4539 struct fe_priv *np = netdev_priv(dev);
dc8216c1
MS
4540 int ret;
4541
dc8216c1
MS
4542 if (np->autoneg) {
4543 int bmcr;
4544
f9430a01
AA
4545 netif_carrier_off(dev);
4546 if (netif_running(dev)) {
4547 nv_disable_irq(dev);
58dfd9c1 4548 netif_tx_lock_bh(dev);
e308a5d8 4549 netif_addr_lock(dev);
f9430a01
AA
4550 spin_lock(&np->lock);
4551 /* stop engines */
36b30ea9 4552 nv_stop_rxtx(dev);
f9430a01 4553 spin_unlock(&np->lock);
e308a5d8 4554 netif_addr_unlock(dev);
58dfd9c1 4555 netif_tx_unlock_bh(dev);
f9430a01
AA
4556 printk(KERN_INFO "%s: link down.\n", dev->name);
4557 }
4558
dc8216c1 4559 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
edf7e5ec
AA
4560 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4561 bmcr |= BMCR_ANENABLE;
4562 /* reset the phy in order for settings to stick*/
4563 if (phy_reset(dev, bmcr)) {
4564 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4565 return -EINVAL;
4566 }
4567 } else {
4568 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4569 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4570 }
dc8216c1 4571
f9430a01 4572 if (netif_running(dev)) {
36b30ea9 4573 nv_start_rxtx(dev);
f9430a01
AA
4574 nv_enable_irq(dev);
4575 }
dc8216c1
MS
4576 ret = 0;
4577 } else {
4578 ret = -EINVAL;
4579 }
dc8216c1
MS
4580
4581 return ret;
4582}
4583
0674d594
ZA
4584static int nv_set_tso(struct net_device *dev, u32 value)
4585{
4586 struct fe_priv *np = netdev_priv(dev);
4587
4588 if ((np->driver_data & DEV_HAS_CHECKSUM))
4589 return ethtool_op_set_tso(dev, value);
4590 else
6a78814f 4591 return -EOPNOTSUPP;
0674d594 4592}
0674d594 4593
eafa59f6
AA
4594static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4595{
4596 struct fe_priv *np = netdev_priv(dev);
4597
4598 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4599 ring->rx_mini_max_pending = 0;
4600 ring->rx_jumbo_max_pending = 0;
4601 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4602
4603 ring->rx_pending = np->rx_ring_size;
4604 ring->rx_mini_pending = 0;
4605 ring->rx_jumbo_pending = 0;
4606 ring->tx_pending = np->tx_ring_size;
4607}
4608
4609static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4610{
4611 struct fe_priv *np = netdev_priv(dev);
4612 u8 __iomem *base = get_hwbase(dev);
761fcd9e 4613 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
eafa59f6
AA
4614 dma_addr_t ring_addr;
4615
4616 if (ring->rx_pending < RX_RING_MIN ||
4617 ring->tx_pending < TX_RING_MIN ||
4618 ring->rx_mini_pending != 0 ||
4619 ring->rx_jumbo_pending != 0 ||
4620 (np->desc_ver == DESC_VER_1 &&
4621 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4622 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4623 (np->desc_ver != DESC_VER_1 &&
4624 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4625 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4626 return -EINVAL;
4627 }
4628
4629 /* allocate new rings */
36b30ea9 4630 if (!nv_optimized(np)) {
eafa59f6
AA
4631 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4632 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4633 &ring_addr);
4634 } else {
4635 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4636 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4637 &ring_addr);
4638 }
761fcd9e
AA
4639 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4640 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4641 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
eafa59f6 4642 /* fall back to old rings */
36b30ea9 4643 if (!nv_optimized(np)) {
f82a9352 4644 if (rxtx_ring)
eafa59f6
AA
4645 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4646 rxtx_ring, ring_addr);
4647 } else {
4648 if (rxtx_ring)
4649 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4650 rxtx_ring, ring_addr);
4651 }
4652 if (rx_skbuff)
4653 kfree(rx_skbuff);
eafa59f6
AA
4654 if (tx_skbuff)
4655 kfree(tx_skbuff);
eafa59f6
AA
4656 goto exit;
4657 }
4658
4659 if (netif_running(dev)) {
4660 nv_disable_irq(dev);
08d93575 4661 nv_napi_disable(dev);
58dfd9c1 4662 netif_tx_lock_bh(dev);
e308a5d8 4663 netif_addr_lock(dev);
eafa59f6
AA
4664 spin_lock(&np->lock);
4665 /* stop engines */
36b30ea9 4666 nv_stop_rxtx(dev);
eafa59f6
AA
4667 nv_txrx_reset(dev);
4668 /* drain queues */
36b30ea9 4669 nv_drain_rxtx(dev);
eafa59f6
AA
4670 /* delete queues */
4671 free_rings(dev);
4672 }
4673
4674 /* set new values */
4675 np->rx_ring_size = ring->rx_pending;
4676 np->tx_ring_size = ring->tx_pending;
36b30ea9
JG
4677
4678 if (!nv_optimized(np)) {
eafa59f6
AA
4679 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4680 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4681 } else {
4682 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4683 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4684 }
761fcd9e
AA
4685 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4686 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
eafa59f6
AA
4687 np->ring_addr = ring_addr;
4688
761fcd9e
AA
4689 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4690 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
eafa59f6
AA
4691
4692 if (netif_running(dev)) {
4693 /* reinit driver view of the queues */
4694 set_bufsize(dev);
4695 if (nv_init_ring(dev)) {
4696 if (!np->in_shutdown)
4697 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4698 }
4699
4700 /* reinit nic view of the queues */
4701 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4702 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4703 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4704 base + NvRegRingSizes);
4705 pci_push(base);
4706 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4707 pci_push(base);
4708
4709 /* restart engines */
36b30ea9 4710 nv_start_rxtx(dev);
eafa59f6 4711 spin_unlock(&np->lock);
e308a5d8 4712 netif_addr_unlock(dev);
58dfd9c1 4713 netif_tx_unlock_bh(dev);
08d93575 4714 nv_napi_enable(dev);
eafa59f6
AA
4715 nv_enable_irq(dev);
4716 }
4717 return 0;
4718exit:
4719 return -ENOMEM;
4720}
4721
b6d0773f
AA
4722static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4723{
4724 struct fe_priv *np = netdev_priv(dev);
4725
4726 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4727 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4728 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4729}
4730
4731static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4732{
4733 struct fe_priv *np = netdev_priv(dev);
4734 int adv, bmcr;
4735
4736 if ((!np->autoneg && np->duplex == 0) ||
4737 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4738 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
4739 dev->name);
4740 return -EINVAL;
4741 }
4742 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4743 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4744 return -EINVAL;
4745 }
4746
4747 netif_carrier_off(dev);
4748 if (netif_running(dev)) {
4749 nv_disable_irq(dev);
58dfd9c1 4750 netif_tx_lock_bh(dev);
e308a5d8 4751 netif_addr_lock(dev);
b6d0773f
AA
4752 spin_lock(&np->lock);
4753 /* stop engines */
36b30ea9 4754 nv_stop_rxtx(dev);
b6d0773f 4755 spin_unlock(&np->lock);
e308a5d8 4756 netif_addr_unlock(dev);
58dfd9c1 4757 netif_tx_unlock_bh(dev);
b6d0773f
AA
4758 }
4759
4760 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4761 if (pause->rx_pause)
4762 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4763 if (pause->tx_pause)
4764 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4765
4766 if (np->autoneg && pause->autoneg) {
4767 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4768
4769 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4770 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4771 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4772 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4773 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4774 adv |= ADVERTISE_PAUSE_ASYM;
4775 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4776
4777 if (netif_running(dev))
4778 printk(KERN_INFO "%s: link down.\n", dev->name);
4779 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4780 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4781 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4782 } else {
4783 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4784 if (pause->rx_pause)
4785 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4786 if (pause->tx_pause)
4787 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4788
4789 if (!netif_running(dev))
4790 nv_update_linkspeed(dev);
4791 else
4792 nv_update_pause(dev, np->pause_flags);
4793 }
4794
4795 if (netif_running(dev)) {
36b30ea9 4796 nv_start_rxtx(dev);
b6d0773f
AA
4797 nv_enable_irq(dev);
4798 }
4799 return 0;
4800}
4801
5ed2616f
AA
4802static u32 nv_get_rx_csum(struct net_device *dev)
4803{
4804 struct fe_priv *np = netdev_priv(dev);
f2ad2d9b 4805 return (np->rx_csum) != 0;
5ed2616f
AA
4806}
4807
4808static int nv_set_rx_csum(struct net_device *dev, u32 data)
4809{
4810 struct fe_priv *np = netdev_priv(dev);
4811 u8 __iomem *base = get_hwbase(dev);
4812 int retcode = 0;
4813
4814 if (np->driver_data & DEV_HAS_CHECKSUM) {
5ed2616f 4815 if (data) {
f2ad2d9b 4816 np->rx_csum = 1;
5ed2616f 4817 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5ed2616f 4818 } else {
f2ad2d9b
AA
4819 np->rx_csum = 0;
4820 /* vlan is dependent on rx checksum offload */
4821 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4822 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
5ed2616f 4823 }
5ed2616f
AA
4824 if (netif_running(dev)) {
4825 spin_lock_irq(&np->lock);
4826 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4827 spin_unlock_irq(&np->lock);
4828 }
4829 } else {
4830 return -EINVAL;
4831 }
4832
4833 return retcode;
4834}
4835
4836static int nv_set_tx_csum(struct net_device *dev, u32 data)
4837{
4838 struct fe_priv *np = netdev_priv(dev);
4839
4840 if (np->driver_data & DEV_HAS_CHECKSUM)
c1086cda 4841 return ethtool_op_set_tx_csum(dev, data);
5ed2616f
AA
4842 else
4843 return -EOPNOTSUPP;
4844}
4845
4846static int nv_set_sg(struct net_device *dev, u32 data)
4847{
4848 struct fe_priv *np = netdev_priv(dev);
4849
4850 if (np->driver_data & DEV_HAS_CHECKSUM)
4851 return ethtool_op_set_sg(dev, data);
4852 else
4853 return -EOPNOTSUPP;
4854}
4855
b9f2c044 4856static int nv_get_sset_count(struct net_device *dev, int sset)
52da3578
AA
4857{
4858 struct fe_priv *np = netdev_priv(dev);
4859
b9f2c044
JG
4860 switch (sset) {
4861 case ETH_SS_TEST:
4862 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4863 return NV_TEST_COUNT_EXTENDED;
4864 else
4865 return NV_TEST_COUNT_BASE;
4866 case ETH_SS_STATS:
8ed1454a
AA
4867 if (np->driver_data & DEV_HAS_STATISTICS_V3)
4868 return NV_DEV_STATISTICS_V3_COUNT;
b9f2c044
JG
4869 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4870 return NV_DEV_STATISTICS_V2_COUNT;
8ed1454a
AA
4871 else if (np->driver_data & DEV_HAS_STATISTICS_V1)
4872 return NV_DEV_STATISTICS_V1_COUNT;
b9f2c044
JG
4873 else
4874 return 0;
4875 default:
4876 return -EOPNOTSUPP;
4877 }
52da3578
AA
4878}
4879
4880static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4881{
4882 struct fe_priv *np = netdev_priv(dev);
4883
4884 /* update stats */
4885 nv_do_stats_poll((unsigned long)dev);
4886
b9f2c044 4887 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
9589c77a
AA
4888}
4889
4890static int nv_link_test(struct net_device *dev)
4891{
4892 struct fe_priv *np = netdev_priv(dev);
4893 int mii_status;
4894
4895 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4896 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4897
4898 /* check phy link status */
4899 if (!(mii_status & BMSR_LSTATUS))
4900 return 0;
4901 else
4902 return 1;
4903}
4904
4905static int nv_register_test(struct net_device *dev)
4906{
4907 u8 __iomem *base = get_hwbase(dev);
4908 int i = 0;
4909 u32 orig_read, new_read;
4910
4911 do {
4912 orig_read = readl(base + nv_registers_test[i].reg);
4913
4914 /* xor with mask to toggle bits */
4915 orig_read ^= nv_registers_test[i].mask;
4916
4917 writel(orig_read, base + nv_registers_test[i].reg);
4918
4919 new_read = readl(base + nv_registers_test[i].reg);
4920
4921 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4922 return 0;
4923
4924 /* restore original value */
4925 orig_read ^= nv_registers_test[i].mask;
4926 writel(orig_read, base + nv_registers_test[i].reg);
4927
4928 } while (nv_registers_test[++i].reg != 0);
4929
4930 return 1;
4931}
4932
4933static int nv_interrupt_test(struct net_device *dev)
4934{
4935 struct fe_priv *np = netdev_priv(dev);
4936 u8 __iomem *base = get_hwbase(dev);
4937 int ret = 1;
4938 int testcnt;
4939 u32 save_msi_flags, save_poll_interval = 0;
4940
4941 if (netif_running(dev)) {
4942 /* free current irq */
4943 nv_free_irq(dev);
4944 save_poll_interval = readl(base+NvRegPollingInterval);
4945 }
4946
4947 /* flag to test interrupt handler */
4948 np->intr_test = 0;
4949
4950 /* setup test irq */
4951 save_msi_flags = np->msi_flags;
4952 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4953 np->msi_flags |= 0x001; /* setup 1 vector */
4954 if (nv_request_irq(dev, 1))
4955 return 0;
4956
4957 /* setup timer interrupt */
4958 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4959 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4960
4961 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4962
4963 /* wait for at least one interrupt */
4964 msleep(100);
4965
4966 spin_lock_irq(&np->lock);
4967
4968 /* flag should be set within ISR */
4969 testcnt = np->intr_test;
4970 if (!testcnt)
4971 ret = 2;
4972
4973 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4974 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4975 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4976 else
4977 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4978
4979 spin_unlock_irq(&np->lock);
4980
4981 nv_free_irq(dev);
4982
4983 np->msi_flags = save_msi_flags;
4984
4985 if (netif_running(dev)) {
4986 writel(save_poll_interval, base + NvRegPollingInterval);
4987 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4988 /* restore original irq */
4989 if (nv_request_irq(dev, 0))
4990 return 0;
4991 }
4992
4993 return ret;
4994}
4995
4996static int nv_loopback_test(struct net_device *dev)
4997{
4998 struct fe_priv *np = netdev_priv(dev);
4999 u8 __iomem *base = get_hwbase(dev);
5000 struct sk_buff *tx_skb, *rx_skb;
5001 dma_addr_t test_dma_addr;
5002 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
f82a9352 5003 u32 flags;
9589c77a
AA
5004 int len, i, pkt_len;
5005 u8 *pkt_data;
5006 u32 filter_flags = 0;
5007 u32 misc1_flags = 0;
5008 int ret = 1;
5009
5010 if (netif_running(dev)) {
5011 nv_disable_irq(dev);
5012 filter_flags = readl(base + NvRegPacketFilterFlags);
5013 misc1_flags = readl(base + NvRegMisc1);
5014 } else {
5015 nv_txrx_reset(dev);
5016 }
5017
5018 /* reinit driver view of the rx queue */
5019 set_bufsize(dev);
5020 nv_init_ring(dev);
5021
5022 /* setup hardware for loopback */
5023 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
5024 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
5025
5026 /* reinit nic view of the rx queue */
5027 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5028 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5029 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5030 base + NvRegRingSizes);
5031 pci_push(base);
5032
5033 /* restart rx engine */
36b30ea9 5034 nv_start_rxtx(dev);
9589c77a
AA
5035
5036 /* setup packet for tx */
5037 pkt_len = ETH_DATA_LEN;
5038 tx_skb = dev_alloc_skb(pkt_len);
46798c89
JJ
5039 if (!tx_skb) {
5040 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
5041 " of %s\n", dev->name);
5042 ret = 0;
5043 goto out;
5044 }
8b5be268
ACM
5045 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
5046 skb_tailroom(tx_skb),
5047 PCI_DMA_FROMDEVICE);
9589c77a
AA
5048 pkt_data = skb_put(tx_skb, pkt_len);
5049 for (i = 0; i < pkt_len; i++)
5050 pkt_data[i] = (u8)(i & 0xff);
9589c77a 5051
36b30ea9 5052 if (!nv_optimized(np)) {
f82a9352
SH
5053 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
5054 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
9589c77a 5055 } else {
5bb7ea26
AV
5056 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
5057 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
f82a9352 5058 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
9589c77a
AA
5059 }
5060 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5061 pci_push(get_hwbase(dev));
5062
5063 msleep(500);
5064
5065 /* check for rx of the packet */
36b30ea9 5066 if (!nv_optimized(np)) {
f82a9352 5067 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
9589c77a
AA
5068 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
5069
5070 } else {
f82a9352 5071 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
9589c77a
AA
5072 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
5073 }
5074
f82a9352 5075 if (flags & NV_RX_AVAIL) {
9589c77a
AA
5076 ret = 0;
5077 } else if (np->desc_ver == DESC_VER_1) {
f82a9352 5078 if (flags & NV_RX_ERROR)
9589c77a
AA
5079 ret = 0;
5080 } else {
f82a9352 5081 if (flags & NV_RX2_ERROR) {
9589c77a
AA
5082 ret = 0;
5083 }
5084 }
5085
5086 if (ret) {
5087 if (len != pkt_len) {
5088 ret = 0;
5089 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
5090 dev->name, len, pkt_len);
5091 } else {
761fcd9e 5092 rx_skb = np->rx_skb[0].skb;
9589c77a
AA
5093 for (i = 0; i < pkt_len; i++) {
5094 if (rx_skb->data[i] != (u8)(i & 0xff)) {
5095 ret = 0;
5096 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
5097 dev->name, i);
5098 break;
5099 }
5100 }
5101 }
5102 } else {
5103 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
5104 }
5105
73a37079 5106 pci_unmap_single(np->pci_dev, test_dma_addr,
4305b541 5107 (skb_end_pointer(tx_skb) - tx_skb->data),
9589c77a
AA
5108 PCI_DMA_TODEVICE);
5109 dev_kfree_skb_any(tx_skb);
46798c89 5110 out:
9589c77a 5111 /* stop engines */
36b30ea9 5112 nv_stop_rxtx(dev);
9589c77a
AA
5113 nv_txrx_reset(dev);
5114 /* drain rx queue */
36b30ea9 5115 nv_drain_rxtx(dev);
9589c77a
AA
5116
5117 if (netif_running(dev)) {
5118 writel(misc1_flags, base + NvRegMisc1);
5119 writel(filter_flags, base + NvRegPacketFilterFlags);
5120 nv_enable_irq(dev);
5121 }
5122
5123 return ret;
5124}
5125
5126static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
5127{
5128 struct fe_priv *np = netdev_priv(dev);
5129 u8 __iomem *base = get_hwbase(dev);
5130 int result;
b9f2c044 5131 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
9589c77a
AA
5132
5133 if (!nv_link_test(dev)) {
5134 test->flags |= ETH_TEST_FL_FAILED;
5135 buffer[0] = 1;
5136 }
5137
5138 if (test->flags & ETH_TEST_FL_OFFLINE) {
5139 if (netif_running(dev)) {
5140 netif_stop_queue(dev);
08d93575 5141 nv_napi_disable(dev);
58dfd9c1 5142 netif_tx_lock_bh(dev);
e308a5d8 5143 netif_addr_lock(dev);
9589c77a
AA
5144 spin_lock_irq(&np->lock);
5145 nv_disable_hw_interrupts(dev, np->irqmask);
5146 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
5147 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5148 } else {
5149 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5150 }
5151 /* stop engines */
36b30ea9 5152 nv_stop_rxtx(dev);
9589c77a
AA
5153 nv_txrx_reset(dev);
5154 /* drain rx queue */
36b30ea9 5155 nv_drain_rxtx(dev);
9589c77a 5156 spin_unlock_irq(&np->lock);
e308a5d8 5157 netif_addr_unlock(dev);
58dfd9c1 5158 netif_tx_unlock_bh(dev);
9589c77a
AA
5159 }
5160
5161 if (!nv_register_test(dev)) {
5162 test->flags |= ETH_TEST_FL_FAILED;
5163 buffer[1] = 1;
5164 }
5165
5166 result = nv_interrupt_test(dev);
5167 if (result != 1) {
5168 test->flags |= ETH_TEST_FL_FAILED;
5169 buffer[2] = 1;
5170 }
5171 if (result == 0) {
5172 /* bail out */
5173 return;
5174 }
5175
5176 if (!nv_loopback_test(dev)) {
5177 test->flags |= ETH_TEST_FL_FAILED;
5178 buffer[3] = 1;
5179 }
5180
5181 if (netif_running(dev)) {
5182 /* reinit driver view of the rx queue */
5183 set_bufsize(dev);
5184 if (nv_init_ring(dev)) {
5185 if (!np->in_shutdown)
5186 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5187 }
5188 /* reinit nic view of the rx queue */
5189 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5190 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5191 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5192 base + NvRegRingSizes);
5193 pci_push(base);
5194 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5195 pci_push(base);
5196 /* restart rx engine */
36b30ea9 5197 nv_start_rxtx(dev);
9589c77a 5198 netif_start_queue(dev);
08d93575 5199 nv_napi_enable(dev);
9589c77a
AA
5200 nv_enable_hw_interrupts(dev, np->irqmask);
5201 }
5202 }
5203}
5204
52da3578
AA
5205static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
5206{
5207 switch (stringset) {
5208 case ETH_SS_STATS:
b9f2c044 5209 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
52da3578 5210 break;
9589c77a 5211 case ETH_SS_TEST:
b9f2c044 5212 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
9589c77a 5213 break;
52da3578
AA
5214 }
5215}
5216
7282d491 5217static const struct ethtool_ops ops = {
1da177e4
LT
5218 .get_drvinfo = nv_get_drvinfo,
5219 .get_link = ethtool_op_get_link,
5220 .get_wol = nv_get_wol,
5221 .set_wol = nv_set_wol,
5222 .get_settings = nv_get_settings,
5223 .set_settings = nv_set_settings,
dc8216c1
MS
5224 .get_regs_len = nv_get_regs_len,
5225 .get_regs = nv_get_regs,
5226 .nway_reset = nv_nway_reset,
6a78814f 5227 .set_tso = nv_set_tso,
eafa59f6
AA
5228 .get_ringparam = nv_get_ringparam,
5229 .set_ringparam = nv_set_ringparam,
b6d0773f
AA
5230 .get_pauseparam = nv_get_pauseparam,
5231 .set_pauseparam = nv_set_pauseparam,
5ed2616f
AA
5232 .get_rx_csum = nv_get_rx_csum,
5233 .set_rx_csum = nv_set_rx_csum,
5ed2616f 5234 .set_tx_csum = nv_set_tx_csum,
5ed2616f 5235 .set_sg = nv_set_sg,
52da3578 5236 .get_strings = nv_get_strings,
52da3578 5237 .get_ethtool_stats = nv_get_ethtool_stats,
b9f2c044 5238 .get_sset_count = nv_get_sset_count,
9589c77a 5239 .self_test = nv_self_test,
1da177e4
LT
5240};
5241
ee407b02
AA
5242static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5243{
5244 struct fe_priv *np = get_nvpriv(dev);
5245
5246 spin_lock_irq(&np->lock);
5247
5248 /* save vlan group */
5249 np->vlangrp = grp;
5250
5251 if (grp) {
5252 /* enable vlan on MAC */
5253 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
5254 } else {
5255 /* disable vlan on MAC */
5256 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
5257 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
5258 }
5259
5260 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5261
5262 spin_unlock_irq(&np->lock);
25805dcf 5263}
ee407b02 5264
7e680c22
AA
5265/* The mgmt unit and driver use a semaphore to access the phy during init */
5266static int nv_mgmt_acquire_sema(struct net_device *dev)
5267{
cac1c52c 5268 struct fe_priv *np = netdev_priv(dev);
7e680c22
AA
5269 u8 __iomem *base = get_hwbase(dev);
5270 int i;
5271 u32 tx_ctrl, mgmt_sema;
5272
5273 for (i = 0; i < 10; i++) {
5274 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5275 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
5276 break;
5277 msleep(500);
5278 }
5279
5280 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
5281 return 0;
5282
5283 for (i = 0; i < 2; i++) {
5284 tx_ctrl = readl(base + NvRegTransmitterControl);
5285 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5286 writel(tx_ctrl, base + NvRegTransmitterControl);
5287
5288 /* verify that semaphore was acquired */
5289 tx_ctrl = readl(base + NvRegTransmitterControl);
5290 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
cac1c52c
AA
5291 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5292 np->mgmt_sema = 1;
7e680c22 5293 return 1;
cac1c52c 5294 }
7e680c22
AA
5295 else
5296 udelay(50);
5297 }
5298
5299 return 0;
5300}
5301
cac1c52c
AA
5302static void nv_mgmt_release_sema(struct net_device *dev)
5303{
5304 struct fe_priv *np = netdev_priv(dev);
5305 u8 __iomem *base = get_hwbase(dev);
5306 u32 tx_ctrl;
5307
5308 if (np->driver_data & DEV_HAS_MGMT_UNIT) {
5309 if (np->mgmt_sema) {
5310 tx_ctrl = readl(base + NvRegTransmitterControl);
5311 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
5312 writel(tx_ctrl, base + NvRegTransmitterControl);
5313 }
5314 }
5315}
5316
5317
5318static int nv_mgmt_get_version(struct net_device *dev)
5319{
5320 struct fe_priv *np = netdev_priv(dev);
5321 u8 __iomem *base = get_hwbase(dev);
5322 u32 data_ready = readl(base + NvRegTransmitterControl);
5323 u32 data_ready2 = 0;
5324 unsigned long start;
5325 int ready = 0;
5326
5327 writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
5328 writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
5329 start = jiffies;
5330 while (time_before(jiffies, start + 5*HZ)) {
5331 data_ready2 = readl(base + NvRegTransmitterControl);
5332 if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
5333 ready = 1;
5334 break;
5335 }
5336 schedule_timeout_uninterruptible(1);
5337 }
5338
5339 if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
5340 return 0;
5341
5342 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
5343
5344 return 1;
5345}
5346
1da177e4
LT
5347static int nv_open(struct net_device *dev)
5348{
ac9c1897 5349 struct fe_priv *np = netdev_priv(dev);
1da177e4 5350 u8 __iomem *base = get_hwbase(dev);
d33a73c8
AA
5351 int ret = 1;
5352 int oom, i;
a433686c 5353 u32 low;
1da177e4
LT
5354
5355 dprintk(KERN_DEBUG "nv_open: begin\n");
5356
cb52deba
ES
5357 /* power up phy */
5358 mii_rw(dev, np->phyaddr, MII_BMCR,
5359 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5360
88d7d8b0 5361 nv_txrx_gate(dev, false);
f1489653 5362 /* erase previous misconfiguration */
86a0f043
AA
5363 if (np->driver_data & DEV_HAS_POWER_CNTRL)
5364 nv_mac_reset(dev);
1da177e4
LT
5365 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5366 writel(0, base + NvRegMulticastAddrB);
bb9a4fd1
AA
5367 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5368 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
1da177e4
LT
5369 writel(0, base + NvRegPacketFilterFlags);
5370
5371 writel(0, base + NvRegTransmitterControl);
5372 writel(0, base + NvRegReceiverControl);
5373
5374 writel(0, base + NvRegAdapterControl);
5375
eb91f61b
AA
5376 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5377 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
5378
f1489653 5379 /* initialize descriptor rings */
d81c0983 5380 set_bufsize(dev);
1da177e4
LT
5381 oom = nv_init_ring(dev);
5382
5383 writel(0, base + NvRegLinkSpeed);
5070d340 5384 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
1da177e4
LT
5385 nv_txrx_reset(dev);
5386 writel(0, base + NvRegUnknownSetupReg6);
5387
5388 np->in_shutdown = 0;
5389
f1489653 5390 /* give hw rings */
0832b25a 5391 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
eafa59f6 5392 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
1da177e4
LT
5393 base + NvRegRingSizes);
5394
1da177e4 5395 writel(np->linkspeed, base + NvRegLinkSpeed);
95d161cb
AA
5396 if (np->desc_ver == DESC_VER_1)
5397 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5398 else
5399 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
8a4ae7f2 5400 writel(np->txrxctl_bits, base + NvRegTxRxControl);
ee407b02 5401 writel(np->vlanctl_bits, base + NvRegVlanControl);
1da177e4 5402 pci_push(base);
8a4ae7f2 5403 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
1da177e4
LT
5404 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5405 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
5406 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
5407
7e680c22 5408 writel(0, base + NvRegMIIMask);
1da177e4 5409 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
eb798428 5410 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
1da177e4 5411
1da177e4
LT
5412 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5413 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5414 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
d81c0983 5415 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1da177e4
LT
5416
5417 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
a433686c
AA
5418
5419 get_random_bytes(&low, sizeof(low));
5420 low &= NVREG_SLOTTIME_MASK;
5421 if (np->desc_ver == DESC_VER_1) {
5422 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5423 } else {
5424 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5425 /* setup legacy backoff */
5426 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5427 } else {
5428 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5429 nv_gear_backoff_reseed(dev);
5430 }
5431 }
9744e218
AA
5432 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5433 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
a971c324
AA
5434 if (poll_interval == -1) {
5435 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5436 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5437 else
5438 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5439 }
5440 else
5441 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
1da177e4
LT
5442 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5443 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5444 base + NvRegAdapterControl);
5445 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
7e680c22 5446 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
c42d9df9
AA
5447 if (np->wolenabled)
5448 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
1da177e4
LT
5449
5450 i = readl(base + NvRegPowerState);
5451 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
5452 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5453
5454 pci_push(base);
5455 udelay(10);
5456 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5457
84b3932b 5458 nv_disable_hw_interrupts(dev, np->irqmask);
1da177e4 5459 pci_push(base);
eb798428 5460 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
1da177e4
LT
5461 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5462 pci_push(base);
5463
9589c77a 5464 if (nv_request_irq(dev, 0)) {
84b3932b 5465 goto out_drain;
d33a73c8 5466 }
1da177e4
LT
5467
5468 /* ask for interrupts */
84b3932b 5469 nv_enable_hw_interrupts(dev, np->irqmask);
1da177e4
LT
5470
5471 spin_lock_irq(&np->lock);
5472 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5473 writel(0, base + NvRegMulticastAddrB);
bb9a4fd1
AA
5474 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5475 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
1da177e4
LT
5476 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5477 /* One manual link speed update: Interrupts are enabled, future link
5478 * speed changes cause interrupts and are handled by nv_link_irq().
5479 */
5480 {
5481 u32 miistat;
5482 miistat = readl(base + NvRegMIIStatus);
eb798428 5483 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
1da177e4
LT
5484 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
5485 }
1b1b3c9b
MS
5486 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5487 * to init hw */
5488 np->linkspeed = 0;
1da177e4 5489 ret = nv_update_linkspeed(dev);
36b30ea9 5490 nv_start_rxtx(dev);
1da177e4 5491 netif_start_queue(dev);
08d93575 5492 nv_napi_enable(dev);
e27cdba5 5493
1da177e4
LT
5494 if (ret) {
5495 netif_carrier_on(dev);
5496 } else {
f7ab697d 5497 printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
1da177e4
LT
5498 netif_carrier_off(dev);
5499 }
5500 if (oom)
5501 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
52da3578
AA
5502
5503 /* start statistics timer */
9c662435 5504 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
bfebbb88
DD
5505 mod_timer(&np->stats_poll,
5506 round_jiffies(jiffies + STATS_INTERVAL));
52da3578 5507
1da177e4
LT
5508 spin_unlock_irq(&np->lock);
5509
5510 return 0;
5511out_drain:
36b30ea9 5512 nv_drain_rxtx(dev);
1da177e4
LT
5513 return ret;
5514}
5515
5516static int nv_close(struct net_device *dev)
5517{
ac9c1897 5518 struct fe_priv *np = netdev_priv(dev);
1da177e4
LT
5519 u8 __iomem *base;
5520
5521 spin_lock_irq(&np->lock);
5522 np->in_shutdown = 1;
5523 spin_unlock_irq(&np->lock);
08d93575 5524 nv_napi_disable(dev);
a7475906 5525 synchronize_irq(np->pci_dev->irq);
1da177e4
LT
5526
5527 del_timer_sync(&np->oom_kick);
5528 del_timer_sync(&np->nic_poll);
52da3578 5529 del_timer_sync(&np->stats_poll);
1da177e4
LT
5530
5531 netif_stop_queue(dev);
5532 spin_lock_irq(&np->lock);
36b30ea9 5533 nv_stop_rxtx(dev);
1da177e4
LT
5534 nv_txrx_reset(dev);
5535
5536 /* disable interrupts on the nic or we will lock up */
5537 base = get_hwbase(dev);
84b3932b 5538 nv_disable_hw_interrupts(dev, np->irqmask);
1da177e4
LT
5539 pci_push(base);
5540 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
5541
5542 spin_unlock_irq(&np->lock);
5543
84b3932b 5544 nv_free_irq(dev);
1da177e4 5545
36b30ea9 5546 nv_drain_rxtx(dev);
1da177e4 5547
5a9a8e32 5548 if (np->wolenabled || !phy_power_down) {
88d7d8b0 5549 nv_txrx_gate(dev, false);
2cc49a5c 5550 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
1da177e4 5551 nv_start_rx(dev);
cb52deba
ES
5552 } else {
5553 /* power down phy */
5554 mii_rw(dev, np->phyaddr, MII_BMCR,
5555 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
88d7d8b0 5556 nv_txrx_gate(dev, true);
2cc49a5c 5557 }
1da177e4
LT
5558
5559 /* FIXME: power down nic */
5560
5561 return 0;
5562}
5563
b94426bd
SH
5564static const struct net_device_ops nv_netdev_ops = {
5565 .ndo_open = nv_open,
5566 .ndo_stop = nv_close,
5567 .ndo_get_stats = nv_get_stats,
00829823
SH
5568 .ndo_start_xmit = nv_start_xmit,
5569 .ndo_tx_timeout = nv_tx_timeout,
5570 .ndo_change_mtu = nv_change_mtu,
5571 .ndo_validate_addr = eth_validate_addr,
5572 .ndo_set_mac_address = nv_set_mac_address,
5573 .ndo_set_multicast_list = nv_set_multicast,
5574 .ndo_vlan_rx_register = nv_vlan_rx_register,
5575#ifdef CONFIG_NET_POLL_CONTROLLER
5576 .ndo_poll_controller = nv_poll_controller,
5577#endif
5578};
5579
5580static const struct net_device_ops nv_netdev_ops_optimized = {
5581 .ndo_open = nv_open,
5582 .ndo_stop = nv_close,
5583 .ndo_get_stats = nv_get_stats,
5584 .ndo_start_xmit = nv_start_xmit_optimized,
b94426bd
SH
5585 .ndo_tx_timeout = nv_tx_timeout,
5586 .ndo_change_mtu = nv_change_mtu,
5587 .ndo_validate_addr = eth_validate_addr,
5588 .ndo_set_mac_address = nv_set_mac_address,
5589 .ndo_set_multicast_list = nv_set_multicast,
5590 .ndo_vlan_rx_register = nv_vlan_rx_register,
5591#ifdef CONFIG_NET_POLL_CONTROLLER
5592 .ndo_poll_controller = nv_poll_controller,
5593#endif
5594};
5595
1da177e4
LT
5596static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5597{
5598 struct net_device *dev;
5599 struct fe_priv *np;
5600 unsigned long addr;
5601 u8 __iomem *base;
5602 int err, i;
5070d340 5603 u32 powerstate, txreg;
7e680c22
AA
5604 u32 phystate_orig = 0, phystate;
5605 int phyinitialized = 0;
3f88ce49
JG
5606 static int printed_version;
5607
5608 if (!printed_version++)
5609 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet"
5610 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION);
1da177e4
LT
5611
5612 dev = alloc_etherdev(sizeof(struct fe_priv));
5613 err = -ENOMEM;
5614 if (!dev)
5615 goto out;
5616
ac9c1897 5617 np = netdev_priv(dev);
bea3348e 5618 np->dev = dev;
1da177e4
LT
5619 np->pci_dev = pci_dev;
5620 spin_lock_init(&np->lock);
1da177e4
LT
5621 SET_NETDEV_DEV(dev, &pci_dev->dev);
5622
5623 init_timer(&np->oom_kick);
5624 np->oom_kick.data = (unsigned long) dev;
5625 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
5626 init_timer(&np->nic_poll);
5627 np->nic_poll.data = (unsigned long) dev;
5628 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
52da3578
AA
5629 init_timer(&np->stats_poll);
5630 np->stats_poll.data = (unsigned long) dev;
5631 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
1da177e4
LT
5632
5633 err = pci_enable_device(pci_dev);
3f88ce49 5634 if (err)
1da177e4 5635 goto out_free;
1da177e4
LT
5636
5637 pci_set_master(pci_dev);
5638
5639 err = pci_request_regions(pci_dev, DRV_NAME);
5640 if (err < 0)
5641 goto out_disable;
5642
9c662435 5643 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
57fff698
AA
5644 np->register_size = NV_PCI_REGSZ_VER3;
5645 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
86a0f043
AA
5646 np->register_size = NV_PCI_REGSZ_VER2;
5647 else
5648 np->register_size = NV_PCI_REGSZ_VER1;
5649
1da177e4
LT
5650 err = -EINVAL;
5651 addr = 0;
5652 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5653 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5654 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
5655 pci_resource_len(pci_dev, i),
5656 pci_resource_flags(pci_dev, i));
5657 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
86a0f043 5658 pci_resource_len(pci_dev, i) >= np->register_size) {
1da177e4
LT
5659 addr = pci_resource_start(pci_dev, i);
5660 break;
5661 }
5662 }
5663 if (i == DEVICE_COUNT_RESOURCE) {
3f88ce49
JG
5664 dev_printk(KERN_INFO, &pci_dev->dev,
5665 "Couldn't find register window\n");
1da177e4
LT
5666 goto out_relreg;
5667 }
5668
86a0f043
AA
5669 /* copy of driver data */
5670 np->driver_data = id->driver_data;
9f3f7910
AA
5671 /* copy of device id */
5672 np->device_id = id->device;
86a0f043 5673
1da177e4 5674 /* handle different descriptor versions */
ee73362c
MS
5675 if (id->driver_data & DEV_HAS_HIGH_DMA) {
5676 /* packet format 3: supports 40-bit addressing */
5677 np->desc_ver = DESC_VER_3;
84b3932b 5678 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
69fe3fd7 5679 if (dma_64bit) {
6afd142f 5680 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
3f88ce49
JG
5681 dev_printk(KERN_INFO, &pci_dev->dev,
5682 "64-bit DMA failed, using 32-bit addressing\n");
5683 else
69fe3fd7 5684 dev->features |= NETIF_F_HIGHDMA;
6afd142f 5685 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
3f88ce49
JG
5686 dev_printk(KERN_INFO, &pci_dev->dev,
5687 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
69fe3fd7 5688 }
ee73362c
MS
5689 }
5690 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5691 /* packet format 2: supports jumbo frames */
1da177e4 5692 np->desc_ver = DESC_VER_2;
8a4ae7f2 5693 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
ee73362c
MS
5694 } else {
5695 /* original packet format */
5696 np->desc_ver = DESC_VER_1;
8a4ae7f2 5697 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
d81c0983 5698 }
ee73362c
MS
5699
5700 np->pkt_limit = NV_PKTLIMIT_1;
5701 if (id->driver_data & DEV_HAS_LARGEDESC)
5702 np->pkt_limit = NV_PKTLIMIT_2;
5703
8a4ae7f2 5704 if (id->driver_data & DEV_HAS_CHECKSUM) {
f2ad2d9b 5705 np->rx_csum = 1;
8a4ae7f2 5706 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
edcfe5f7 5707 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
fa45459e 5708 dev->features |= NETIF_F_TSO;
21828163 5709 }
8a4ae7f2 5710
ee407b02
AA
5711 np->vlanctl_bits = 0;
5712 if (id->driver_data & DEV_HAS_VLAN) {
5713 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5714 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
ee407b02
AA
5715 }
5716
b6d0773f 5717 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5289b4c4
AA
5718 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5719 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5720 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
b6d0773f 5721 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
eb91f61b 5722 }
f3b197ac 5723
eb91f61b 5724
1da177e4 5725 err = -ENOMEM;
86a0f043 5726 np->base = ioremap(addr, np->register_size);
1da177e4
LT
5727 if (!np->base)
5728 goto out_relreg;
5729 dev->base_addr = (unsigned long)np->base;
ee73362c 5730
1da177e4 5731 dev->irq = pci_dev->irq;
ee73362c 5732
eafa59f6
AA
5733 np->rx_ring_size = RX_RING_DEFAULT;
5734 np->tx_ring_size = TX_RING_DEFAULT;
eafa59f6 5735
36b30ea9 5736 if (!nv_optimized(np)) {
ee73362c 5737 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
eafa59f6 5738 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
ee73362c
MS
5739 &np->ring_addr);
5740 if (!np->rx_ring.orig)
5741 goto out_unmap;
eafa59f6 5742 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
ee73362c
MS
5743 } else {
5744 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
eafa59f6 5745 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
ee73362c
MS
5746 &np->ring_addr);
5747 if (!np->rx_ring.ex)
5748 goto out_unmap;
eafa59f6
AA
5749 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5750 }
dd00cc48
YP
5751 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5752 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
761fcd9e 5753 if (!np->rx_skb || !np->tx_skb)
eafa59f6 5754 goto out_freering;
1da177e4 5755
36b30ea9 5756 if (!nv_optimized(np))
00829823 5757 dev->netdev_ops = &nv_netdev_ops;
86b22b0d 5758 else
00829823 5759 dev->netdev_ops = &nv_netdev_ops_optimized;
b94426bd 5760
e27cdba5 5761#ifdef CONFIG_FORCEDETH_NAPI
bea3348e 5762 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
2918c35d 5763#endif
1da177e4 5764 SET_ETHTOOL_OPS(dev, &ops);
1da177e4
LT
5765 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5766
5767 pci_set_drvdata(pci_dev, dev);
5768
5769 /* read the mac address */
5770 base = get_hwbase(dev);
5771 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5772 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5773
5070d340
AA
5774 /* check the workaround bit for correct mac address order */
5775 txreg = readl(base + NvRegTransmitPoll);
a376e79c 5776 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5070d340
AA
5777 /* mac address is already in correct order */
5778 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5779 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5780 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5781 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5782 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5783 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
a376e79c
AA
5784 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5785 /* mac address is already in correct order */
5786 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5787 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5788 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5789 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5790 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5791 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5792 /*
5793 * Set orig mac address back to the reversed version.
5794 * This flag will be cleared during low power transition.
5795 * Therefore, we should always put back the reversed address.
5796 */
5797 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5798 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5799 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5070d340
AA
5800 } else {
5801 /* need to reverse mac address to correct order */
5802 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5803 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5804 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5805 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5806 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5807 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5070d340 5808 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
f55c21fd 5809 printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
5070d340 5810 }
c704b856 5811 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4 5812
c704b856 5813 if (!is_valid_ether_addr(dev->perm_addr)) {
1da177e4
LT
5814 /*
5815 * Bad mac address. At least one bios sets the mac address
5816 * to 01:23:45:67:89:ab
5817 */
3f88ce49 5818 dev_printk(KERN_ERR, &pci_dev->dev,
e174961c
JB
5819 "Invalid Mac address detected: %pM\n",
5820 dev->dev_addr);
3f88ce49
JG
5821 dev_printk(KERN_ERR, &pci_dev->dev,
5822 "Please complain to your hardware vendor. Switching to a random MAC.\n");
1da177e4
LT
5823 dev->dev_addr[0] = 0x00;
5824 dev->dev_addr[1] = 0x00;
5825 dev->dev_addr[2] = 0x6c;
5826 get_random_bytes(&dev->dev_addr[3], 3);
5827 }
5828
e174961c
JB
5829 dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
5830 pci_name(pci_dev), dev->dev_addr);
1da177e4 5831
f1489653
AA
5832 /* set mac address */
5833 nv_copy_mac_to_hw(dev);
5834
9a60a826
TD
5835 /* Workaround current PCI init glitch: wakeup bits aren't
5836 * being set from PCI PM capability.
5837 */
5838 device_init_wakeup(&pci_dev->dev, 1);
5839
1da177e4
LT
5840 /* disable WOL */
5841 writel(0, base + NvRegWakeUpFlags);
5842 np->wolenabled = 0;
5843
86a0f043 5844 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
86a0f043
AA
5845
5846 /* take phy and nic out of low power mode */
5847 powerstate = readl(base + NvRegPowerState2);
5848 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
3c2e1c11 5849 if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
44c10138 5850 pci_dev->revision >= 0xA3)
86a0f043
AA
5851 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5852 writel(powerstate, base + NvRegPowerState2);
5853 }
5854
1da177e4 5855 if (np->desc_ver == DESC_VER_1) {
ac9c1897 5856 np->tx_flags = NV_TX_VALID;
1da177e4 5857 } else {
ac9c1897 5858 np->tx_flags = NV_TX2_VALID;
1da177e4 5859 }
9e184767
AA
5860
5861 np->msi_flags = 0;
5862 if ((id->driver_data & DEV_HAS_MSI) && msi) {
5863 np->msi_flags |= NV_MSI_CAPABLE;
5864 }
5865 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5866 /* msix has had reported issues when modifying irqmask
5867 as in the case of napi, therefore, disable for now
5868 */
5869#ifndef CONFIG_FORCEDETH_NAPI
5870 np->msi_flags |= NV_MSI_X_CAPABLE;
5871#endif
5872 }
5873
5874 if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
a971c324 5875 np->irqmask = NVREG_IRQMASK_CPU;
d33a73c8
AA
5876 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5877 np->msi_flags |= 0x0001;
9e184767
AA
5878 } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
5879 !(id->driver_data & DEV_NEED_TIMERIRQ)) {
5880 /* start off in throughput mode */
5881 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5882 /* remove support for msix mode */
5883 np->msi_flags &= ~NV_MSI_X_CAPABLE;
5884 } else {
5885 optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
5886 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5887 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5888 np->msi_flags |= 0x0003;
d33a73c8 5889 }
a971c324 5890
1da177e4
LT
5891 if (id->driver_data & DEV_NEED_TIMERIRQ)
5892 np->irqmask |= NVREG_IRQ_TIMER;
5893 if (id->driver_data & DEV_NEED_LINKTIMER) {
5894 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5895 np->need_linktimer = 1;
5896 np->link_timeout = jiffies + LINK_TIMEOUT;
5897 } else {
5898 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5899 np->need_linktimer = 0;
5900 }
5901
3b446c3e
AA
5902 /* Limit the number of tx's outstanding for hw bug */
5903 if (id->driver_data & DEV_NEED_TX_LIMIT) {
5904 np->tx_limit = 1;
3c2e1c11 5905 if ((id->driver_data & DEV_NEED_TX_LIMIT2) &&
3b446c3e
AA
5906 pci_dev->revision >= 0xA2)
5907 np->tx_limit = 0;
5908 }
5909
7e680c22
AA
5910 /* clear phy state and temporarily halt phy interrupts */
5911 writel(0, base + NvRegMIIMask);
5912 phystate = readl(base + NvRegAdapterControl);
5913 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5914 phystate_orig = 1;
5915 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5916 writel(phystate, base + NvRegAdapterControl);
5917 }
eb798428 5918 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
7e680c22
AA
5919
5920 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
7e680c22 5921 /* management unit running on the mac? */
cac1c52c
AA
5922 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
5923 (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
5924 nv_mgmt_acquire_sema(dev) &&
5925 nv_mgmt_get_version(dev)) {
5926 np->mac_in_use = 1;
5927 if (np->mgmt_version > 0) {
5928 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5929 }
5930 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
5931 pci_name(pci_dev), np->mac_in_use);
5932 /* management unit setup the phy already? */
5933 if (np->mac_in_use &&
5934 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5935 NVREG_XMITCTL_SYNC_PHY_INIT)) {
5936 /* phy is inited by mgmt unit */
5937 phyinitialized = 1;
5938 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n",
5939 pci_name(pci_dev));
5940 } else {
5941 /* we need to init the phy */
7e680c22
AA
5942 }
5943 }
5944 }
5945
1da177e4 5946 /* find a suitable phy */
7a33e45a 5947 for (i = 1; i <= 32; i++) {
1da177e4 5948 int id1, id2;
7a33e45a 5949 int phyaddr = i & 0x1F;
1da177e4
LT
5950
5951 spin_lock_irq(&np->lock);
7a33e45a 5952 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
1da177e4
LT
5953 spin_unlock_irq(&np->lock);
5954 if (id1 < 0 || id1 == 0xffff)
5955 continue;
5956 spin_lock_irq(&np->lock);
7a33e45a 5957 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
1da177e4
LT
5958 spin_unlock_irq(&np->lock);
5959 if (id2 < 0 || id2 == 0xffff)
5960 continue;
5961
edf7e5ec 5962 np->phy_model = id2 & PHYID2_MODEL_MASK;
1da177e4
LT
5963 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5964 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5965 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
7a33e45a
AA
5966 pci_name(pci_dev), id1, id2, phyaddr);
5967 np->phyaddr = phyaddr;
1da177e4 5968 np->phy_oui = id1 | id2;
9f3f7910
AA
5969
5970 /* Realtek hardcoded phy id1 to all zero's on certain phys */
5971 if (np->phy_oui == PHY_OUI_REALTEK2)
5972 np->phy_oui = PHY_OUI_REALTEK;
5973 /* Setup phy revision for Realtek */
5974 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5975 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5976
1da177e4
LT
5977 break;
5978 }
7a33e45a 5979 if (i == 33) {
3f88ce49
JG
5980 dev_printk(KERN_INFO, &pci_dev->dev,
5981 "open: Could not find a valid PHY.\n");
eafa59f6 5982 goto out_error;
1da177e4 5983 }
f3b197ac 5984
7e680c22
AA
5985 if (!phyinitialized) {
5986 /* reset it */
5987 phy_init(dev);
f35723ec
AA
5988 } else {
5989 /* see if it is a gigabit phy */
5990 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5991 if (mii_status & PHY_GIGABIT) {
5992 np->gigabit = PHY_GIGABIT;
5993 }
7e680c22 5994 }
1da177e4
LT
5995
5996 /* set default link speed settings */
5997 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5998 np->duplex = 0;
5999 np->autoneg = 1;
6000
6001 err = register_netdev(dev);
6002 if (err) {
3f88ce49
JG
6003 dev_printk(KERN_INFO, &pci_dev->dev,
6004 "unable to register netdev: %d\n", err);
eafa59f6 6005 goto out_error;
1da177e4 6006 }
3f88ce49
JG
6007
6008 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, "
6009 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
6010 dev->name,
6011 np->phy_oui,
6012 np->phyaddr,
6013 dev->dev_addr[0],
6014 dev->dev_addr[1],
6015 dev->dev_addr[2],
6016 dev->dev_addr[3],
6017 dev->dev_addr[4],
6018 dev->dev_addr[5]);
6019
6020 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
6021 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
edcfe5f7 6022 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
3f88ce49
JG
6023 "csum " : "",
6024 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
6025 "vlan " : "",
6026 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
6027 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
6028 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
6029 np->gigabit == PHY_GIGABIT ? "gbit " : "",
6030 np->need_linktimer ? "lnktim " : "",
6031 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
6032 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
6033 np->desc_ver);
1da177e4
LT
6034
6035 return 0;
6036
eafa59f6 6037out_error:
7e680c22
AA
6038 if (phystate_orig)
6039 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
1da177e4 6040 pci_set_drvdata(pci_dev, NULL);
eafa59f6
AA
6041out_freering:
6042 free_rings(dev);
1da177e4
LT
6043out_unmap:
6044 iounmap(get_hwbase(dev));
6045out_relreg:
6046 pci_release_regions(pci_dev);
6047out_disable:
6048 pci_disable_device(pci_dev);
6049out_free:
6050 free_netdev(dev);
6051out:
6052 return err;
6053}
6054
9f3f7910
AA
6055static void nv_restore_phy(struct net_device *dev)
6056{
6057 struct fe_priv *np = netdev_priv(dev);
6058 u16 phy_reserved, mii_control;
6059
6060 if (np->phy_oui == PHY_OUI_REALTEK &&
6061 np->phy_model == PHY_MODEL_REALTEK_8201 &&
6062 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
6063 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
6064 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
6065 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
6066 phy_reserved |= PHY_REALTEK_INIT8;
6067 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
6068 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
6069
6070 /* restart auto negotiation */
6071 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
6072 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
6073 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
6074 }
6075}
6076
f55c21fd 6077static void nv_restore_mac_addr(struct pci_dev *pci_dev)
1da177e4
LT
6078{
6079 struct net_device *dev = pci_get_drvdata(pci_dev);
f1489653
AA
6080 struct fe_priv *np = netdev_priv(dev);
6081 u8 __iomem *base = get_hwbase(dev);
1da177e4 6082
f1489653
AA
6083 /* special op: write back the misordered MAC address - otherwise
6084 * the next nv_probe would see a wrong address.
6085 */
6086 writel(np->orig_mac[0], base + NvRegMacAddrA);
6087 writel(np->orig_mac[1], base + NvRegMacAddrB);