2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * We suspect that on some hardware no TX done interrupts are generated.
34 * This means recovery from netif_stop_queue only happens if the hw timer
35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37 * If your hardware reliably generates tx done interrupts, then you can remove
38 * DEV_NEED_TIMERIRQ from the driver_data flags.
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic.
42 #define FORCEDETH_VERSION "0.62"
43 #define DRV_NAME "forcedeth"
45 #include <linux/module.h>
46 #include <linux/types.h>
47 #include <linux/pci.h>
48 #include <linux/interrupt.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/delay.h>
52 #include <linux/spinlock.h>
53 #include <linux/ethtool.h>
54 #include <linux/timer.h>
55 #include <linux/skbuff.h>
56 #include <linux/mii.h>
57 #include <linux/random.h>
58 #include <linux/init.h>
59 #include <linux/if_vlan.h>
60 #include <linux/dma-mapping.h>
64 #include <asm/uaccess.h>
65 #include <asm/system.h>
68 #define dprintk printk
70 #define dprintk(x...) do { } while (0)
73 #define TX_WORK_PER_LOOP 64
74 #define RX_WORK_PER_LOOP 64
80 #define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */
81 #define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */
82 #define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */
83 #define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */
84 #define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */
85 #define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */
86 #define DEV_HAS_MSI 0x000040 /* device supports MSI */
87 #define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */
88 #define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */
89 #define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */
90 #define DEV_HAS_STATISTICS_V2 0x000400 /* device supports hw statistics version 2 */
91 #define DEV_HAS_STATISTICS_V3 0x000800 /* device supports hw statistics version 3 */
92 #define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */
93 #define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */
94 #define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */
95 #define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */
96 #define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */
97 #define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */
98 #define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */
99 #define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */
100 #define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */
103 NvRegIrqStatus = 0x000,
104 #define NVREG_IRQSTAT_MIIEVENT 0x040
105 #define NVREG_IRQSTAT_MASK 0x81ff
106 NvRegIrqMask = 0x004,
107 #define NVREG_IRQ_RX_ERROR 0x0001
108 #define NVREG_IRQ_RX 0x0002
109 #define NVREG_IRQ_RX_NOBUF 0x0004
110 #define NVREG_IRQ_TX_ERR 0x0008
111 #define NVREG_IRQ_TX_OK 0x0010
112 #define NVREG_IRQ_TIMER 0x0020
113 #define NVREG_IRQ_LINK 0x0040
114 #define NVREG_IRQ_RX_FORCED 0x0080
115 #define NVREG_IRQ_TX_FORCED 0x0100
116 #define NVREG_IRQ_RECOVER_ERROR 0x8000
117 #define NVREG_IRQMASK_THROUGHPUT 0x00df
118 #define NVREG_IRQMASK_CPU 0x0060
119 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
120 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
121 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
123 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
124 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
125 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
127 NvRegUnknownSetupReg6 = 0x008,
128 #define NVREG_UNKSETUP6_VAL 3
131 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
132 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
134 NvRegPollingInterval = 0x00c,
135 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
136 #define NVREG_POLL_DEFAULT_CPU 13
137 NvRegMSIMap0 = 0x020,
138 NvRegMSIMap1 = 0x024,
139 NvRegMSIIrqMask = 0x030,
140 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
142 #define NVREG_MISC1_PAUSE_TX 0x01
143 #define NVREG_MISC1_HD 0x02
144 #define NVREG_MISC1_FORCE 0x3b0f3c
146 NvRegMacReset = 0x34,
147 #define NVREG_MAC_RESET_ASSERT 0x0F3
148 NvRegTransmitterControl = 0x084,
149 #define NVREG_XMITCTL_START 0x01
150 #define NVREG_XMITCTL_MGMT_ST 0x40000000
151 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
152 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
153 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
154 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
155 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
156 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
157 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
158 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
159 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
160 NvRegTransmitterStatus = 0x088,
161 #define NVREG_XMITSTAT_BUSY 0x01
163 NvRegPacketFilterFlags = 0x8c,
164 #define NVREG_PFF_PAUSE_RX 0x08
165 #define NVREG_PFF_ALWAYS 0x7F0000
166 #define NVREG_PFF_PROMISC 0x80
167 #define NVREG_PFF_MYADDR 0x20
168 #define NVREG_PFF_LOOPBACK 0x10
170 NvRegOffloadConfig = 0x90,
171 #define NVREG_OFFLOAD_HOMEPHY 0x601
172 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
173 NvRegReceiverControl = 0x094,
174 #define NVREG_RCVCTL_START 0x01
175 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
176 NvRegReceiverStatus = 0x98,
177 #define NVREG_RCVSTAT_BUSY 0x01
179 NvRegSlotTime = 0x9c,
180 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
181 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
182 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
183 #define NVREG_SLOTTIME_HALF 0x0000ff00
184 #define NVREG_SLOTTIME_DEFAULT 0x00007f00
185 #define NVREG_SLOTTIME_MASK 0x000000ff
187 NvRegTxDeferral = 0xA0,
188 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
189 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
190 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
191 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
192 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
193 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
194 NvRegRxDeferral = 0xA4,
195 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
196 NvRegMacAddrA = 0xA8,
197 NvRegMacAddrB = 0xAC,
198 NvRegMulticastAddrA = 0xB0,
199 #define NVREG_MCASTADDRA_FORCE 0x01
200 NvRegMulticastAddrB = 0xB4,
201 NvRegMulticastMaskA = 0xB8,
202 #define NVREG_MCASTMASKA_NONE 0xffffffff
203 NvRegMulticastMaskB = 0xBC,
204 #define NVREG_MCASTMASKB_NONE 0xffff
206 NvRegPhyInterface = 0xC0,
207 #define PHY_RGMII 0x10000000
208 NvRegBackOffControl = 0xC4,
209 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
210 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
211 #define NVREG_BKOFFCTRL_SELECT 24
212 #define NVREG_BKOFFCTRL_GEAR 12
214 NvRegTxRingPhysAddr = 0x100,
215 NvRegRxRingPhysAddr = 0x104,
216 NvRegRingSizes = 0x108,
217 #define NVREG_RINGSZ_TXSHIFT 0
218 #define NVREG_RINGSZ_RXSHIFT 16
219 NvRegTransmitPoll = 0x10c,
220 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
221 NvRegLinkSpeed = 0x110,
222 #define NVREG_LINKSPEED_FORCE 0x10000
223 #define NVREG_LINKSPEED_10 1000
224 #define NVREG_LINKSPEED_100 100
225 #define NVREG_LINKSPEED_1000 50
226 #define NVREG_LINKSPEED_MASK (0xFFF)
227 NvRegUnknownSetupReg5 = 0x130,
228 #define NVREG_UNKSETUP5_BIT31 (1<<31)
229 NvRegTxWatermark = 0x13c,
230 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
231 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
232 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
233 NvRegTxRxControl = 0x144,
234 #define NVREG_TXRXCTL_KICK 0x0001
235 #define NVREG_TXRXCTL_BIT1 0x0002
236 #define NVREG_TXRXCTL_BIT2 0x0004
237 #define NVREG_TXRXCTL_IDLE 0x0008
238 #define NVREG_TXRXCTL_RESET 0x0010
239 #define NVREG_TXRXCTL_RXCHECK 0x0400
240 #define NVREG_TXRXCTL_DESC_1 0
241 #define NVREG_TXRXCTL_DESC_2 0x002100
242 #define NVREG_TXRXCTL_DESC_3 0xc02200
243 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
244 #define NVREG_TXRXCTL_VLANINS 0x00080
245 NvRegTxRingPhysAddrHigh = 0x148,
246 NvRegRxRingPhysAddrHigh = 0x14C,
247 NvRegTxPauseFrame = 0x170,
248 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
249 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
250 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
251 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
252 NvRegTxPauseFrameLimit = 0x174,
253 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
254 NvRegMIIStatus = 0x180,
255 #define NVREG_MIISTAT_ERROR 0x0001
256 #define NVREG_MIISTAT_LINKCHANGE 0x0008
257 #define NVREG_MIISTAT_MASK_RW 0x0007
258 #define NVREG_MIISTAT_MASK_ALL 0x000f
259 NvRegMIIMask = 0x184,
260 #define NVREG_MII_LINKCHANGE 0x0008
262 NvRegAdapterControl = 0x188,
263 #define NVREG_ADAPTCTL_START 0x02
264 #define NVREG_ADAPTCTL_LINKUP 0x04
265 #define NVREG_ADAPTCTL_PHYVALID 0x40000
266 #define NVREG_ADAPTCTL_RUNNING 0x100000
267 #define NVREG_ADAPTCTL_PHYSHIFT 24
268 NvRegMIISpeed = 0x18c,
269 #define NVREG_MIISPEED_BIT8 (1<<8)
270 #define NVREG_MIIDELAY 5
271 NvRegMIIControl = 0x190,
272 #define NVREG_MIICTL_INUSE 0x08000
273 #define NVREG_MIICTL_WRITE 0x00400
274 #define NVREG_MIICTL_ADDRSHIFT 5
275 NvRegMIIData = 0x194,
276 NvRegTxUnicast = 0x1a0,
277 NvRegTxMulticast = 0x1a4,
278 NvRegTxBroadcast = 0x1a8,
279 NvRegWakeUpFlags = 0x200,
280 #define NVREG_WAKEUPFLAGS_VAL 0x7770
281 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
282 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
283 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
284 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
285 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
286 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
287 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
288 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
289 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
290 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
292 NvRegPatternCRC = 0x204,
293 NvRegPatternMask = 0x208,
294 NvRegPowerCap = 0x268,
295 #define NVREG_POWERCAP_D3SUPP (1<<30)
296 #define NVREG_POWERCAP_D2SUPP (1<<26)
297 #define NVREG_POWERCAP_D1SUPP (1<<25)
298 NvRegPowerState = 0x26c,
299 #define NVREG_POWERSTATE_POWEREDUP 0x8000
300 #define NVREG_POWERSTATE_VALID 0x0100
301 #define NVREG_POWERSTATE_MASK 0x0003
302 #define NVREG_POWERSTATE_D0 0x0000
303 #define NVREG_POWERSTATE_D1 0x0001
304 #define NVREG_POWERSTATE_D2 0x0002
305 #define NVREG_POWERSTATE_D3 0x0003
307 NvRegTxZeroReXmt = 0x284,
308 NvRegTxOneReXmt = 0x288,
309 NvRegTxManyReXmt = 0x28c,
310 NvRegTxLateCol = 0x290,
311 NvRegTxUnderflow = 0x294,
312 NvRegTxLossCarrier = 0x298,
313 NvRegTxExcessDef = 0x29c,
314 NvRegTxRetryErr = 0x2a0,
315 NvRegRxFrameErr = 0x2a4,
316 NvRegRxExtraByte = 0x2a8,
317 NvRegRxLateCol = 0x2ac,
319 NvRegRxFrameTooLong = 0x2b4,
320 NvRegRxOverflow = 0x2b8,
321 NvRegRxFCSErr = 0x2bc,
322 NvRegRxFrameAlignErr = 0x2c0,
323 NvRegRxLenErr = 0x2c4,
324 NvRegRxUnicast = 0x2c8,
325 NvRegRxMulticast = 0x2cc,
326 NvRegRxBroadcast = 0x2d0,
328 NvRegTxFrame = 0x2d8,
330 NvRegTxPause = 0x2e0,
331 NvRegRxPause = 0x2e4,
332 NvRegRxDropFrame = 0x2e8,
333 NvRegVlanControl = 0x300,
334 #define NVREG_VLANCONTROL_ENABLE 0x2000
335 NvRegMSIXMap0 = 0x3e0,
336 NvRegMSIXMap1 = 0x3e4,
337 NvRegMSIXIrqStatus = 0x3f0,
339 NvRegPowerState2 = 0x600,
340 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
341 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
342 #define NVREG_POWERSTATE2_PHY_RESET 0x0004
345 /* Big endian: should work, but is untested */
351 struct ring_desc_ex {
359 struct ring_desc* orig;
360 struct ring_desc_ex* ex;
363 #define FLAG_MASK_V1 0xffff0000
364 #define FLAG_MASK_V2 0xffffc000
365 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
366 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
368 #define NV_TX_LASTPACKET (1<<16)
369 #define NV_TX_RETRYERROR (1<<19)
370 #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
371 #define NV_TX_FORCED_INTERRUPT (1<<24)
372 #define NV_TX_DEFERRED (1<<26)
373 #define NV_TX_CARRIERLOST (1<<27)
374 #define NV_TX_LATECOLLISION (1<<28)
375 #define NV_TX_UNDERFLOW (1<<29)
376 #define NV_TX_ERROR (1<<30)
377 #define NV_TX_VALID (1<<31)
379 #define NV_TX2_LASTPACKET (1<<29)
380 #define NV_TX2_RETRYERROR (1<<18)
381 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
382 #define NV_TX2_FORCED_INTERRUPT (1<<30)
383 #define NV_TX2_DEFERRED (1<<25)
384 #define NV_TX2_CARRIERLOST (1<<26)
385 #define NV_TX2_LATECOLLISION (1<<27)
386 #define NV_TX2_UNDERFLOW (1<<28)
387 /* error and valid are the same for both */
388 #define NV_TX2_ERROR (1<<30)
389 #define NV_TX2_VALID (1<<31)
390 #define NV_TX2_TSO (1<<28)
391 #define NV_TX2_TSO_SHIFT 14
392 #define NV_TX2_TSO_MAX_SHIFT 14
393 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
394 #define NV_TX2_CHECKSUM_L3 (1<<27)
395 #define NV_TX2_CHECKSUM_L4 (1<<26)
397 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
399 #define NV_RX_DESCRIPTORVALID (1<<16)
400 #define NV_RX_MISSEDFRAME (1<<17)
401 #define NV_RX_SUBSTRACT1 (1<<18)
402 #define NV_RX_ERROR1 (1<<23)
403 #define NV_RX_ERROR2 (1<<24)
404 #define NV_RX_ERROR3 (1<<25)
405 #define NV_RX_ERROR4 (1<<26)
406 #define NV_RX_CRCERR (1<<27)
407 #define NV_RX_OVERFLOW (1<<28)
408 #define NV_RX_FRAMINGERR (1<<29)
409 #define NV_RX_ERROR (1<<30)
410 #define NV_RX_AVAIL (1<<31)
411 #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
413 #define NV_RX2_CHECKSUMMASK (0x1C000000)
414 #define NV_RX2_CHECKSUM_IP (0x10000000)
415 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
416 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
417 #define NV_RX2_DESCRIPTORVALID (1<<29)
418 #define NV_RX2_SUBSTRACT1 (1<<25)
419 #define NV_RX2_ERROR1 (1<<18)
420 #define NV_RX2_ERROR2 (1<<19)
421 #define NV_RX2_ERROR3 (1<<20)
422 #define NV_RX2_ERROR4 (1<<21)
423 #define NV_RX2_CRCERR (1<<22)
424 #define NV_RX2_OVERFLOW (1<<23)
425 #define NV_RX2_FRAMINGERR (1<<24)
426 /* error and avail are the same for both */
427 #define NV_RX2_ERROR (1<<30)
428 #define NV_RX2_AVAIL (1<<31)
429 #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
431 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
432 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
434 /* Miscelaneous hardware related defines: */
435 #define NV_PCI_REGSZ_VER1 0x270
436 #define NV_PCI_REGSZ_VER2 0x2d4
437 #define NV_PCI_REGSZ_VER3 0x604
438 #define NV_PCI_REGSZ_MAX 0x604
440 /* various timeout delays: all in usec */
441 #define NV_TXRX_RESET_DELAY 4
442 #define NV_TXSTOP_DELAY1 10
443 #define NV_TXSTOP_DELAY1MAX 500000
444 #define NV_TXSTOP_DELAY2 100
445 #define NV_RXSTOP_DELAY1 10
446 #define NV_RXSTOP_DELAY1MAX 500000
447 #define NV_RXSTOP_DELAY2 100
448 #define NV_SETUP5_DELAY 5
449 #define NV_SETUP5_DELAYMAX 50000
450 #define NV_POWERUP_DELAY 5
451 #define NV_POWERUP_DELAYMAX 5000
452 #define NV_MIIBUSY_DELAY 50
453 #define NV_MIIPHY_DELAY 10
454 #define NV_MIIPHY_DELAYMAX 10000
455 #define NV_MAC_RESET_DELAY 64
457 #define NV_WAKEUPPATTERNS 5
458 #define NV_WAKEUPMASKENTRIES 4
460 /* General driver defaults */
461 #define NV_WATCHDOG_TIMEO (5*HZ)
463 #define RX_RING_DEFAULT 128
464 #define TX_RING_DEFAULT 256
465 #define RX_RING_MIN 128
466 #define TX_RING_MIN 64
467 #define RING_MAX_DESC_VER_1 1024
468 #define RING_MAX_DESC_VER_2_3 16384
470 /* rx/tx mac addr + type + vlan + align + slack*/
471 #define NV_RX_HEADERS (64)
472 /* even more slack. */
473 #define NV_RX_ALLOC_PAD (64)
475 /* maximum mtu size */
476 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
477 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
479 #define OOM_REFILL (1+HZ/20)
480 #define POLL_WAIT (1+HZ/100)
481 #define LINK_TIMEOUT (3*HZ)
482 #define STATS_INTERVAL (10*HZ)
486 * The nic supports three different descriptor types:
487 * - DESC_VER_1: Original
488 * - DESC_VER_2: support for jumbo frames.
489 * - DESC_VER_3: 64-bit format.
496 #define PHY_OUI_MARVELL 0x5043
497 #define PHY_OUI_CICADA 0x03f1
498 #define PHY_OUI_VITESSE 0x01c1
499 #define PHY_OUI_REALTEK 0x0732
500 #define PHY_OUI_REALTEK2 0x0020
501 #define PHYID1_OUI_MASK 0x03ff
502 #define PHYID1_OUI_SHFT 6
503 #define PHYID2_OUI_MASK 0xfc00
504 #define PHYID2_OUI_SHFT 10
505 #define PHYID2_MODEL_MASK 0x03f0
506 #define PHY_MODEL_REALTEK_8211 0x0110
507 #define PHY_REV_MASK 0x0001
508 #define PHY_REV_REALTEK_8211B 0x0000
509 #define PHY_REV_REALTEK_8211C 0x0001
510 #define PHY_MODEL_REALTEK_8201 0x0200
511 #define PHY_MODEL_MARVELL_E3016 0x0220
512 #define PHY_MARVELL_E3016_INITMASK 0x0300
513 #define PHY_CICADA_INIT1 0x0f000
514 #define PHY_CICADA_INIT2 0x0e00
515 #define PHY_CICADA_INIT3 0x01000
516 #define PHY_CICADA_INIT4 0x0200
517 #define PHY_CICADA_INIT5 0x0004
518 #define PHY_CICADA_INIT6 0x02000
519 #define PHY_VITESSE_INIT_REG1 0x1f
520 #define PHY_VITESSE_INIT_REG2 0x10
521 #define PHY_VITESSE_INIT_REG3 0x11
522 #define PHY_VITESSE_INIT_REG4 0x12
523 #define PHY_VITESSE_INIT_MSK1 0xc
524 #define PHY_VITESSE_INIT_MSK2 0x0180
525 #define PHY_VITESSE_INIT1 0x52b5
526 #define PHY_VITESSE_INIT2 0xaf8a
527 #define PHY_VITESSE_INIT3 0x8
528 #define PHY_VITESSE_INIT4 0x8f8a
529 #define PHY_VITESSE_INIT5 0xaf86
530 #define PHY_VITESSE_INIT6 0x8f86
531 #define PHY_VITESSE_INIT7 0xaf82
532 #define PHY_VITESSE_INIT8 0x0100
533 #define PHY_VITESSE_INIT9 0x8f82
534 #define PHY_VITESSE_INIT10 0x0
535 #define PHY_REALTEK_INIT_REG1 0x1f
536 #define PHY_REALTEK_INIT_REG2 0x19
537 #define PHY_REALTEK_INIT_REG3 0x13
538 #define PHY_REALTEK_INIT_REG4 0x14
539 #define PHY_REALTEK_INIT_REG5 0x18
540 #define PHY_REALTEK_INIT_REG6 0x11
541 #define PHY_REALTEK_INIT_REG7 0x01
542 #define PHY_REALTEK_INIT1 0x0000
543 #define PHY_REALTEK_INIT2 0x8e00
544 #define PHY_REALTEK_INIT3 0x0001
545 #define PHY_REALTEK_INIT4 0xad17
546 #define PHY_REALTEK_INIT5 0xfb54
547 #define PHY_REALTEK_INIT6 0xf5c7
548 #define PHY_REALTEK_INIT7 0x1000
549 #define PHY_REALTEK_INIT8 0x0003
550 #define PHY_REALTEK_INIT9 0x0008
551 #define PHY_REALTEK_INIT10 0x0005
552 #define PHY_REALTEK_INIT11 0x0200
553 #define PHY_REALTEK_INIT_MSK1 0x0003
555 #define PHY_GIGABIT 0x0100
557 #define PHY_TIMEOUT 0x1
558 #define PHY_ERROR 0x2
562 #define PHY_HALF 0x100
564 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
565 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
566 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
567 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
568 #define NV_PAUSEFRAME_RX_REQ 0x0010
569 #define NV_PAUSEFRAME_TX_REQ 0x0020
570 #define NV_PAUSEFRAME_AUTONEG 0x0040
572 /* MSI/MSI-X defines */
573 #define NV_MSI_X_MAX_VECTORS 8
574 #define NV_MSI_X_VECTORS_MASK 0x000f
575 #define NV_MSI_CAPABLE 0x0010
576 #define NV_MSI_X_CAPABLE 0x0020
577 #define NV_MSI_ENABLED 0x0040
578 #define NV_MSI_X_ENABLED 0x0080
580 #define NV_MSI_X_VECTOR_ALL 0x0
581 #define NV_MSI_X_VECTOR_RX 0x0
582 #define NV_MSI_X_VECTOR_TX 0x1
583 #define NV_MSI_X_VECTOR_OTHER 0x2
585 #define NV_RESTART_TX 0x1
586 #define NV_RESTART_RX 0x2
588 #define NV_TX_LIMIT_COUNT 16
591 struct nv_ethtool_str {
592 char name[ETH_GSTRING_LEN];
595 static const struct nv_ethtool_str nv_estats_str[] = {
600 { "tx_late_collision" },
601 { "tx_fifo_errors" },
602 { "tx_carrier_errors" },
603 { "tx_excess_deferral" },
604 { "tx_retry_error" },
605 { "rx_frame_error" },
607 { "rx_late_collision" },
609 { "rx_frame_too_long" },
610 { "rx_over_errors" },
612 { "rx_frame_align_error" },
613 { "rx_length_error" },
618 { "rx_errors_total" },
619 { "tx_errors_total" },
621 /* version 2 stats */
629 /* version 3 stats */
635 struct nv_ethtool_stats {
640 u64 tx_late_collision;
642 u64 tx_carrier_errors;
643 u64 tx_excess_deferral;
647 u64 rx_late_collision;
649 u64 rx_frame_too_long;
652 u64 rx_frame_align_error;
661 /* version 2 stats */
669 /* version 3 stats */
675 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
676 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
677 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
680 #define NV_TEST_COUNT_BASE 3
681 #define NV_TEST_COUNT_EXTENDED 4
683 static const struct nv_ethtool_str nv_etests_str[] = {
684 { "link (online/offline)" },
685 { "register (offline) " },
686 { "interrupt (offline) " },
687 { "loopback (offline) " }
690 struct register_test {
695 static const struct register_test nv_registers_test[] = {
696 { NvRegUnknownSetupReg6, 0x01 },
697 { NvRegMisc1, 0x03c },
698 { NvRegOffloadConfig, 0x03ff },
699 { NvRegMulticastAddrA, 0xffffffff },
700 { NvRegTxWatermark, 0x0ff },
701 { NvRegWakeUpFlags, 0x07777 },
708 unsigned int dma_len;
709 struct ring_desc_ex *first_tx_desc;
710 struct nv_skb_map *next_tx_ctx;
715 * All hardware access under netdev_priv(dev)->lock, except the performance
717 * - rx is (pseudo-) lockless: it relies on the single-threading provided
718 * by the arch code for interrupts.
719 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
720 * needs netdev_priv(dev)->lock :-(
721 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
724 /* in dev: base, irq */
728 struct net_device *dev;
729 struct napi_struct napi;
732 * Locking: spin_lock(&np->lock); */
733 struct nv_ethtool_stats estats;
741 unsigned int phy_oui;
742 unsigned int phy_model;
743 unsigned int phy_rev;
748 /* General data: RO fields */
749 dma_addr_t ring_addr;
750 struct pci_dev *pci_dev;
764 /* rx specific fields.
765 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
767 union ring_type get_rx, put_rx, first_rx, last_rx;
768 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
769 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
770 struct nv_skb_map *rx_skb;
772 union ring_type rx_ring;
773 unsigned int rx_buf_sz;
774 unsigned int pkt_limit;
775 struct timer_list oom_kick;
776 struct timer_list nic_poll;
777 struct timer_list stats_poll;
781 /* media detection workaround.
782 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
785 unsigned long link_timeout;
787 * tx specific fields.
789 union ring_type get_tx, put_tx, first_tx, last_tx;
790 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
791 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
792 struct nv_skb_map *tx_skb;
794 union ring_type tx_ring;
798 u32 tx_pkts_in_progress;
799 struct nv_skb_map *tx_change_owner;
800 struct nv_skb_map *tx_end_flip;
804 struct vlan_group *vlangrp;
806 /* msi/msi-x fields */
808 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
813 /* power saved state */
814 u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
816 /* for different msi-x irq type */
817 char name_rx[IFNAMSIZ + 3]; /* -rx */
818 char name_tx[IFNAMSIZ + 3]; /* -tx */
819 char name_other[IFNAMSIZ + 6]; /* -other */
823 * Maximum number of loops until we assume that a bit in the irq mask
824 * is stuck. Overridable with module param.
826 static int max_interrupt_work = 15;
829 * Optimization can be either throuput mode or cpu mode
831 * Throughput Mode: Every tx and rx packet will generate an interrupt.
832 * CPU Mode: Interrupts are controlled by a timer.
835 NV_OPTIMIZATION_MODE_THROUGHPUT,
836 NV_OPTIMIZATION_MODE_CPU
838 static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
841 * Poll interval for timer irq
843 * This interval determines how frequent an interrupt is generated.
844 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
845 * Min = 0, and Max = 65535
847 static int poll_interval = -1;
856 static int msi = NV_MSI_INT_ENABLED;
862 NV_MSIX_INT_DISABLED,
865 static int msix = NV_MSIX_INT_DISABLED;
871 NV_DMA_64BIT_DISABLED,
874 static int dma_64bit = NV_DMA_64BIT_ENABLED;
877 * Crossover Detection
878 * Realtek 8201 phy + some OEM boards do not work properly.
881 NV_CROSSOVER_DETECTION_DISABLED,
882 NV_CROSSOVER_DETECTION_ENABLED
884 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
886 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
888 return netdev_priv(dev);
891 static inline u8 __iomem *get_hwbase(struct net_device *dev)
893 return ((struct fe_priv *)netdev_priv(dev))->base;
896 static inline void pci_push(u8 __iomem *base)
898 /* force out pending posted writes */
902 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
904 return le32_to_cpu(prd->flaglen)
905 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
908 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
910 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
913 static bool nv_optimized(struct fe_priv *np)
915 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
920 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
921 int delay, int delaymax, const char *msg)
923 u8 __iomem *base = get_hwbase(dev);
934 } while ((readl(base + offset) & mask) != target);
938 #define NV_SETUP_RX_RING 0x01
939 #define NV_SETUP_TX_RING 0x02
941 static inline u32 dma_low(dma_addr_t addr)
946 static inline u32 dma_high(dma_addr_t addr)
948 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
951 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
953 struct fe_priv *np = get_nvpriv(dev);
954 u8 __iomem *base = get_hwbase(dev);
956 if (!nv_optimized(np)) {
957 if (rxtx_flags & NV_SETUP_RX_RING) {
958 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
960 if (rxtx_flags & NV_SETUP_TX_RING) {
961 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
964 if (rxtx_flags & NV_SETUP_RX_RING) {
965 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
966 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
968 if (rxtx_flags & NV_SETUP_TX_RING) {
969 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
970 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
975 static void free_rings(struct net_device *dev)
977 struct fe_priv *np = get_nvpriv(dev);
979 if (!nv_optimized(np)) {
980 if (np->rx_ring.orig)
981 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
982 np->rx_ring.orig, np->ring_addr);
985 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
986 np->rx_ring.ex, np->ring_addr);
994 static int using_multi_irqs(struct net_device *dev)
996 struct fe_priv *np = get_nvpriv(dev);
998 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
999 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1000 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1006 static void nv_enable_irq(struct net_device *dev)
1008 struct fe_priv *np = get_nvpriv(dev);
1010 if (!using_multi_irqs(dev)) {
1011 if (np->msi_flags & NV_MSI_X_ENABLED)
1012 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1014 enable_irq(np->pci_dev->irq);
1016 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1017 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1018 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1022 static void nv_disable_irq(struct net_device *dev)
1024 struct fe_priv *np = get_nvpriv(dev);
1026 if (!using_multi_irqs(dev)) {
1027 if (np->msi_flags & NV_MSI_X_ENABLED)
1028 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1030 disable_irq(np->pci_dev->irq);
1032 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1033 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1034 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1038 /* In MSIX mode, a write to irqmask behaves as XOR */
1039 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1041 u8 __iomem *base = get_hwbase(dev);
1043 writel(mask, base + NvRegIrqMask);
1046 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1048 struct fe_priv *np = get_nvpriv(dev);
1049 u8 __iomem *base = get_hwbase(dev);
1051 if (np->msi_flags & NV_MSI_X_ENABLED) {
1052 writel(mask, base + NvRegIrqMask);
1054 if (np->msi_flags & NV_MSI_ENABLED)
1055 writel(0, base + NvRegMSIIrqMask);
1056 writel(0, base + NvRegIrqMask);
1060 #define MII_READ (-1)
1061 /* mii_rw: read/write a register on the PHY.
1063 * Caller must guarantee serialization
1065 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1067 u8 __iomem *base = get_hwbase(dev);
1071 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1073 reg = readl(base + NvRegMIIControl);
1074 if (reg & NVREG_MIICTL_INUSE) {
1075 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1076 udelay(NV_MIIBUSY_DELAY);
1079 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1080 if (value != MII_READ) {
1081 writel(value, base + NvRegMIIData);
1082 reg |= NVREG_MIICTL_WRITE;
1084 writel(reg, base + NvRegMIIControl);
1086 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1087 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1088 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1089 dev->name, miireg, addr);
1091 } else if (value != MII_READ) {
1092 /* it was a write operation - fewer failures are detectable */
1093 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1094 dev->name, value, miireg, addr);
1096 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1097 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1098 dev->name, miireg, addr);
1101 retval = readl(base + NvRegMIIData);
1102 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1103 dev->name, miireg, addr, retval);
1109 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1111 struct fe_priv *np = netdev_priv(dev);
1113 unsigned int tries = 0;
1115 miicontrol = BMCR_RESET | bmcr_setup;
1116 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1120 /* wait for 500ms */
1123 /* must wait till reset is deasserted */
1124 while (miicontrol & BMCR_RESET) {
1126 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1127 /* FIXME: 100 tries seem excessive */
1134 static int phy_init(struct net_device *dev)
1136 struct fe_priv *np = get_nvpriv(dev);
1137 u8 __iomem *base = get_hwbase(dev);
1138 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1140 /* phy errata for E3016 phy */
1141 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1142 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1143 reg &= ~PHY_MARVELL_E3016_INITMASK;
1144 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1145 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1149 if (np->phy_oui == PHY_OUI_REALTEK) {
1150 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1151 np->phy_rev == PHY_REV_REALTEK_8211B) {
1152 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1153 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1156 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1157 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1160 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1161 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1164 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1165 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1168 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1169 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1172 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1173 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1176 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1177 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1181 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1182 np->phy_rev == PHY_REV_REALTEK_8211C) {
1183 u32 powerstate = readl(base + NvRegPowerState2);
1185 /* need to perform hw phy reset */
1186 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1187 writel(powerstate, base + NvRegPowerState2);
1190 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1191 writel(powerstate, base + NvRegPowerState2);
1194 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1195 reg |= PHY_REALTEK_INIT9;
1196 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
1197 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1200 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
1201 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1204 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1205 if (!(reg & PHY_REALTEK_INIT11)) {
1206 reg |= PHY_REALTEK_INIT11;
1207 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
1208 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1212 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1213 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1217 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1218 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
1219 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
1220 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
1221 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
1222 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
1223 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
1224 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
1225 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
1226 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1227 phy_reserved |= PHY_REALTEK_INIT7;
1228 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1229 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1236 /* set advertise register */
1237 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1238 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1239 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1240 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1244 /* get phy interface type */
1245 phyinterface = readl(base + NvRegPhyInterface);
1247 /* see if gigabit phy */
1248 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1249 if (mii_status & PHY_GIGABIT) {
1250 np->gigabit = PHY_GIGABIT;
1251 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1252 mii_control_1000 &= ~ADVERTISE_1000HALF;
1253 if (phyinterface & PHY_RGMII)
1254 mii_control_1000 |= ADVERTISE_1000FULL;
1256 mii_control_1000 &= ~ADVERTISE_1000FULL;
1258 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1259 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1266 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1267 mii_control |= BMCR_ANENABLE;
1269 if (np->phy_oui == PHY_OUI_REALTEK &&
1270 np->phy_model == PHY_MODEL_REALTEK_8211 &&
1271 np->phy_rev == PHY_REV_REALTEK_8211C) {
1272 /* start autoneg since we already performed hw reset above */
1273 mii_control |= BMCR_ANRESTART;
1274 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1275 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
1280 * (certain phys need bmcr to be setup with reset)
1282 if (phy_reset(dev, mii_control)) {
1283 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1288 /* phy vendor specific configuration */
1289 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1290 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1291 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1292 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1293 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1294 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1297 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1298 phy_reserved |= PHY_CICADA_INIT5;
1299 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1300 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1304 if (np->phy_oui == PHY_OUI_CICADA) {
1305 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1306 phy_reserved |= PHY_CICADA_INIT6;
1307 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1308 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1312 if (np->phy_oui == PHY_OUI_VITESSE) {
1313 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1314 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1317 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1318 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1321 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1322 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1323 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1326 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1327 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1328 phy_reserved |= PHY_VITESSE_INIT3;
1329 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1330 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1333 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1334 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1337 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1338 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1341 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1342 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1343 phy_reserved |= PHY_VITESSE_INIT3;
1344 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1345 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1348 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1349 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1350 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1353 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1354 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1357 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1358 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1361 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1362 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1363 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1366 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1367 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1368 phy_reserved |= PHY_VITESSE_INIT8;
1369 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1370 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1373 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1374 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1377 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1378 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1382 if (np->phy_oui == PHY_OUI_REALTEK) {
1383 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1384 np->phy_rev == PHY_REV_REALTEK_8211B) {
1385 /* reset could have cleared these out, set them back */
1386 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1387 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1390 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1391 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1394 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1395 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1398 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1399 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1402 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1403 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1406 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1407 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1410 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1411 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1415 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1416 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
1417 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
1418 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
1419 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
1420 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
1421 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
1422 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
1423 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
1424 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1425 phy_reserved |= PHY_REALTEK_INIT7;
1426 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1427 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1431 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1432 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1433 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1436 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
1437 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1438 phy_reserved |= PHY_REALTEK_INIT3;
1439 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
1440 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1443 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1444 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1451 /* some phys clear out pause advertisment on reset, set it back */
1452 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1454 /* restart auto negotiation, power down phy */
1455 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1456 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE | BMCR_PDOWN);
1457 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1464 static void nv_start_rx(struct net_device *dev)
1466 struct fe_priv *np = netdev_priv(dev);
1467 u8 __iomem *base = get_hwbase(dev);
1468 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1470 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1471 /* Already running? Stop it. */
1472 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1473 rx_ctrl &= ~NVREG_RCVCTL_START;
1474 writel(rx_ctrl, base + NvRegReceiverControl);
1477 writel(np->linkspeed, base + NvRegLinkSpeed);
1479 rx_ctrl |= NVREG_RCVCTL_START;
1481 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1482 writel(rx_ctrl, base + NvRegReceiverControl);
1483 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1484 dev->name, np->duplex, np->linkspeed);
1488 static void nv_stop_rx(struct net_device *dev)
1490 struct fe_priv *np = netdev_priv(dev);
1491 u8 __iomem *base = get_hwbase(dev);
1492 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1494 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1495 if (!np->mac_in_use)
1496 rx_ctrl &= ~NVREG_RCVCTL_START;
1498 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1499 writel(rx_ctrl, base + NvRegReceiverControl);
1500 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1501 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1502 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1504 udelay(NV_RXSTOP_DELAY2);
1505 if (!np->mac_in_use)
1506 writel(0, base + NvRegLinkSpeed);
1509 static void nv_start_tx(struct net_device *dev)
1511 struct fe_priv *np = netdev_priv(dev);
1512 u8 __iomem *base = get_hwbase(dev);
1513 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1515 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1516 tx_ctrl |= NVREG_XMITCTL_START;
1518 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1519 writel(tx_ctrl, base + NvRegTransmitterControl);
1523 static void nv_stop_tx(struct net_device *dev)
1525 struct fe_priv *np = netdev_priv(dev);
1526 u8 __iomem *base = get_hwbase(dev);
1527 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1529 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1530 if (!np->mac_in_use)
1531 tx_ctrl &= ~NVREG_XMITCTL_START;
1533 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1534 writel(tx_ctrl, base + NvRegTransmitterControl);
1535 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1536 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1537 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1539 udelay(NV_TXSTOP_DELAY2);
1540 if (!np->mac_in_use)
1541 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1542 base + NvRegTransmitPoll);
1545 static void nv_start_rxtx(struct net_device *dev)
1551 static void nv_stop_rxtx(struct net_device *dev)
1557 static void nv_txrx_reset(struct net_device *dev)
1559 struct fe_priv *np = netdev_priv(dev);
1560 u8 __iomem *base = get_hwbase(dev);
1562 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1563 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1565 udelay(NV_TXRX_RESET_DELAY);
1566 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1570 static void nv_mac_reset(struct net_device *dev)
1572 struct fe_priv *np = netdev_priv(dev);
1573 u8 __iomem *base = get_hwbase(dev);
1574 u32 temp1, temp2, temp3;
1576 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1578 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1581 /* save registers since they will be cleared on reset */
1582 temp1 = readl(base + NvRegMacAddrA);
1583 temp2 = readl(base + NvRegMacAddrB);
1584 temp3 = readl(base + NvRegTransmitPoll);
1586 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1588 udelay(NV_MAC_RESET_DELAY);
1589 writel(0, base + NvRegMacReset);
1591 udelay(NV_MAC_RESET_DELAY);
1593 /* restore saved registers */
1594 writel(temp1, base + NvRegMacAddrA);
1595 writel(temp2, base + NvRegMacAddrB);
1596 writel(temp3, base + NvRegTransmitPoll);
1598 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1602 static void nv_get_hw_stats(struct net_device *dev)
1604 struct fe_priv *np = netdev_priv(dev);
1605 u8 __iomem *base = get_hwbase(dev);
1607 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1608 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1609 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1610 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1611 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1612 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1613 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1614 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1615 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1616 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1617 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1618 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1619 np->estats.rx_runt += readl(base + NvRegRxRunt);
1620 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1621 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1622 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1623 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1624 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1625 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1626 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1627 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1628 np->estats.rx_packets =
1629 np->estats.rx_unicast +
1630 np->estats.rx_multicast +
1631 np->estats.rx_broadcast;
1632 np->estats.rx_errors_total =
1633 np->estats.rx_crc_errors +
1634 np->estats.rx_over_errors +
1635 np->estats.rx_frame_error +
1636 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1637 np->estats.rx_late_collision +
1638 np->estats.rx_runt +
1639 np->estats.rx_frame_too_long;
1640 np->estats.tx_errors_total =
1641 np->estats.tx_late_collision +
1642 np->estats.tx_fifo_errors +
1643 np->estats.tx_carrier_errors +
1644 np->estats.tx_excess_deferral +
1645 np->estats.tx_retry_error;
1647 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1648 np->estats.tx_deferral += readl(base + NvRegTxDef);
1649 np->estats.tx_packets += readl(base + NvRegTxFrame);
1650 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1651 np->estats.tx_pause += readl(base + NvRegTxPause);
1652 np->estats.rx_pause += readl(base + NvRegRxPause);
1653 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1656 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1657 np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1658 np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1659 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1664 * nv_get_stats: dev->get_stats function
1665 * Get latest stats value from the nic.
1666 * Called with read_lock(&dev_base_lock) held for read -
1667 * only synchronized against unregister_netdevice.
1669 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1671 struct fe_priv *np = netdev_priv(dev);
1673 /* If the nic supports hw counters then retrieve latest values */
1674 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1675 nv_get_hw_stats(dev);
1677 /* copy to net_device stats */
1678 dev->stats.tx_bytes = np->estats.tx_bytes;
1679 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1680 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1681 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1682 dev->stats.rx_over_errors = np->estats.rx_over_errors;
1683 dev->stats.rx_errors = np->estats.rx_errors_total;
1684 dev->stats.tx_errors = np->estats.tx_errors_total;
1691 * nv_alloc_rx: fill rx ring entries.
1692 * Return 1 if the allocations for the skbs failed and the
1693 * rx engine is without Available descriptors
1695 static int nv_alloc_rx(struct net_device *dev)
1697 struct fe_priv *np = netdev_priv(dev);
1698 struct ring_desc* less_rx;
1700 less_rx = np->get_rx.orig;
1701 if (less_rx-- == np->first_rx.orig)
1702 less_rx = np->last_rx.orig;
1704 while (np->put_rx.orig != less_rx) {
1705 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1707 np->put_rx_ctx->skb = skb;
1708 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1711 PCI_DMA_FROMDEVICE);
1712 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1713 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1715 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1716 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1717 np->put_rx.orig = np->first_rx.orig;
1718 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1719 np->put_rx_ctx = np->first_rx_ctx;
1727 static int nv_alloc_rx_optimized(struct net_device *dev)
1729 struct fe_priv *np = netdev_priv(dev);
1730 struct ring_desc_ex* less_rx;
1732 less_rx = np->get_rx.ex;
1733 if (less_rx-- == np->first_rx.ex)
1734 less_rx = np->last_rx.ex;
1736 while (np->put_rx.ex != less_rx) {
1737 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1739 np->put_rx_ctx->skb = skb;
1740 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1743 PCI_DMA_FROMDEVICE);
1744 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1745 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1746 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1748 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1749 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1750 np->put_rx.ex = np->first_rx.ex;
1751 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1752 np->put_rx_ctx = np->first_rx_ctx;
1760 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1761 #ifdef CONFIG_FORCEDETH_NAPI
1762 static void nv_do_rx_refill(unsigned long data)
1764 struct net_device *dev = (struct net_device *) data;
1765 struct fe_priv *np = netdev_priv(dev);
1767 /* Just reschedule NAPI rx processing */
1768 napi_schedule(&np->napi);
1771 static void nv_do_rx_refill(unsigned long data)
1773 struct net_device *dev = (struct net_device *) data;
1774 struct fe_priv *np = netdev_priv(dev);
1777 if (!using_multi_irqs(dev)) {
1778 if (np->msi_flags & NV_MSI_X_ENABLED)
1779 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1781 disable_irq(np->pci_dev->irq);
1783 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1785 if (!nv_optimized(np))
1786 retcode = nv_alloc_rx(dev);
1788 retcode = nv_alloc_rx_optimized(dev);
1790 spin_lock_irq(&np->lock);
1791 if (!np->in_shutdown)
1792 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1793 spin_unlock_irq(&np->lock);
1795 if (!using_multi_irqs(dev)) {
1796 if (np->msi_flags & NV_MSI_X_ENABLED)
1797 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1799 enable_irq(np->pci_dev->irq);
1801 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1806 static void nv_init_rx(struct net_device *dev)
1808 struct fe_priv *np = netdev_priv(dev);
1811 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1813 if (!nv_optimized(np))
1814 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1816 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1817 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1818 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1820 for (i = 0; i < np->rx_ring_size; i++) {
1821 if (!nv_optimized(np)) {
1822 np->rx_ring.orig[i].flaglen = 0;
1823 np->rx_ring.orig[i].buf = 0;
1825 np->rx_ring.ex[i].flaglen = 0;
1826 np->rx_ring.ex[i].txvlan = 0;
1827 np->rx_ring.ex[i].bufhigh = 0;
1828 np->rx_ring.ex[i].buflow = 0;
1830 np->rx_skb[i].skb = NULL;
1831 np->rx_skb[i].dma = 0;
1835 static void nv_init_tx(struct net_device *dev)
1837 struct fe_priv *np = netdev_priv(dev);
1840 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1842 if (!nv_optimized(np))
1843 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1845 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1846 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1847 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1848 np->tx_pkts_in_progress = 0;
1849 np->tx_change_owner = NULL;
1850 np->tx_end_flip = NULL;
1852 for (i = 0; i < np->tx_ring_size; i++) {
1853 if (!nv_optimized(np)) {
1854 np->tx_ring.orig[i].flaglen = 0;
1855 np->tx_ring.orig[i].buf = 0;
1857 np->tx_ring.ex[i].flaglen = 0;
1858 np->tx_ring.ex[i].txvlan = 0;
1859 np->tx_ring.ex[i].bufhigh = 0;
1860 np->tx_ring.ex[i].buflow = 0;
1862 np->tx_skb[i].skb = NULL;
1863 np->tx_skb[i].dma = 0;
1864 np->tx_skb[i].dma_len = 0;
1865 np->tx_skb[i].first_tx_desc = NULL;
1866 np->tx_skb[i].next_tx_ctx = NULL;
1870 static int nv_init_ring(struct net_device *dev)
1872 struct fe_priv *np = netdev_priv(dev);
1877 if (!nv_optimized(np))
1878 return nv_alloc_rx(dev);
1880 return nv_alloc_rx_optimized(dev);
1883 static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1885 struct fe_priv *np = netdev_priv(dev);
1888 pci_unmap_page(np->pci_dev, tx_skb->dma,
1894 dev_kfree_skb_any(tx_skb->skb);
1902 static void nv_drain_tx(struct net_device *dev)
1904 struct fe_priv *np = netdev_priv(dev);
1907 for (i = 0; i < np->tx_ring_size; i++) {
1908 if (!nv_optimized(np)) {
1909 np->tx_ring.orig[i].flaglen = 0;
1910 np->tx_ring.orig[i].buf = 0;
1912 np->tx_ring.ex[i].flaglen = 0;
1913 np->tx_ring.ex[i].txvlan = 0;
1914 np->tx_ring.ex[i].bufhigh = 0;
1915 np->tx_ring.ex[i].buflow = 0;
1917 if (nv_release_txskb(dev, &np->tx_skb[i]))
1918 dev->stats.tx_dropped++;
1919 np->tx_skb[i].dma = 0;
1920 np->tx_skb[i].dma_len = 0;
1921 np->tx_skb[i].first_tx_desc = NULL;
1922 np->tx_skb[i].next_tx_ctx = NULL;
1924 np->tx_pkts_in_progress = 0;
1925 np->tx_change_owner = NULL;
1926 np->tx_end_flip = NULL;
1929 static void nv_drain_rx(struct net_device *dev)
1931 struct fe_priv *np = netdev_priv(dev);
1934 for (i = 0; i < np->rx_ring_size; i++) {
1935 if (!nv_optimized(np)) {
1936 np->rx_ring.orig[i].flaglen = 0;
1937 np->rx_ring.orig[i].buf = 0;
1939 np->rx_ring.ex[i].flaglen = 0;
1940 np->rx_ring.ex[i].txvlan = 0;
1941 np->rx_ring.ex[i].bufhigh = 0;
1942 np->rx_ring.ex[i].buflow = 0;
1945 if (np->rx_skb[i].skb) {
1946 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1947 (skb_end_pointer(np->rx_skb[i].skb) -
1948 np->rx_skb[i].skb->data),
1949 PCI_DMA_FROMDEVICE);
1950 dev_kfree_skb(np->rx_skb[i].skb);
1951 np->rx_skb[i].skb = NULL;
1956 static void nv_drain_rxtx(struct net_device *dev)
1962 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1964 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1967 static void nv_legacybackoff_reseed(struct net_device *dev)
1969 u8 __iomem *base = get_hwbase(dev);
1974 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
1975 get_random_bytes(&low, sizeof(low));
1976 reg |= low & NVREG_SLOTTIME_MASK;
1978 /* Need to stop tx before change takes effect.
1979 * Caller has already gained np->lock.
1981 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
1985 writel(reg, base + NvRegSlotTime);
1991 /* Gear Backoff Seeds */
1992 #define BACKOFF_SEEDSET_ROWS 8
1993 #define BACKOFF_SEEDSET_LFSRS 15
1995 /* Known Good seed sets */
1996 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
1997 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
1998 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
1999 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2000 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2001 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2002 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2003 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2004 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
2006 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2007 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2008 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2009 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2010 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2011 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2012 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2013 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2014 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
2016 static void nv_gear_backoff_reseed(struct net_device *dev)
2018 u8 __iomem *base = get_hwbase(dev);
2019 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2020 u32 temp, seedset, combinedSeed;
2023 /* Setup seed for free running LFSR */
2024 /* We are going to read the time stamp counter 3 times
2025 and swizzle bits around to increase randomness */
2026 get_random_bytes(&miniseed1, sizeof(miniseed1));
2027 miniseed1 &= 0x0fff;
2031 get_random_bytes(&miniseed2, sizeof(miniseed2));
2032 miniseed2 &= 0x0fff;
2035 miniseed2_reversed =
2036 ((miniseed2 & 0xF00) >> 8) |
2037 (miniseed2 & 0x0F0) |
2038 ((miniseed2 & 0x00F) << 8);
2040 get_random_bytes(&miniseed3, sizeof(miniseed3));
2041 miniseed3 &= 0x0fff;
2044 miniseed3_reversed =
2045 ((miniseed3 & 0xF00) >> 8) |
2046 (miniseed3 & 0x0F0) |
2047 ((miniseed3 & 0x00F) << 8);
2049 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2050 (miniseed2 ^ miniseed3_reversed);
2052 /* Seeds can not be zero */
2053 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2054 combinedSeed |= 0x08;
2055 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2056 combinedSeed |= 0x8000;
2058 /* No need to disable tx here */
2059 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2060 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2061 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2062 writel(temp,base + NvRegBackOffControl);
2064 /* Setup seeds for all gear LFSRs. */
2065 get_random_bytes(&seedset, sizeof(seedset));
2066 seedset = seedset % BACKOFF_SEEDSET_ROWS;
2067 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
2069 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2070 temp |= main_seedset[seedset][i-1] & 0x3ff;
2071 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2072 writel(temp, base + NvRegBackOffControl);
2077 * nv_start_xmit: dev->hard_start_xmit function
2078 * Called with netif_tx_lock held.
2080 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2082 struct fe_priv *np = netdev_priv(dev);
2084 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2085 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2089 u32 size = skb->len-skb->data_len;
2090 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2092 struct ring_desc* put_tx;
2093 struct ring_desc* start_tx;
2094 struct ring_desc* prev_tx;
2095 struct nv_skb_map* prev_tx_ctx;
2096 unsigned long flags;
2098 /* add fragments to entries count */
2099 for (i = 0; i < fragments; i++) {
2100 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2101 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2104 spin_lock_irqsave(&np->lock, flags);
2105 empty_slots = nv_get_empty_tx_slots(np);
2106 if (unlikely(empty_slots <= entries)) {
2107 netif_stop_queue(dev);
2109 spin_unlock_irqrestore(&np->lock, flags);
2110 return NETDEV_TX_BUSY;
2112 spin_unlock_irqrestore(&np->lock, flags);
2114 start_tx = put_tx = np->put_tx.orig;
2116 /* setup the header buffer */
2119 prev_tx_ctx = np->put_tx_ctx;
2120 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2121 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2123 np->put_tx_ctx->dma_len = bcnt;
2124 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2125 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2127 tx_flags = np->tx_flags;
2130 if (unlikely(put_tx++ == np->last_tx.orig))
2131 put_tx = np->first_tx.orig;
2132 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2133 np->put_tx_ctx = np->first_tx_ctx;
2136 /* setup the fragments */
2137 for (i = 0; i < fragments; i++) {
2138 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2139 u32 size = frag->size;
2144 prev_tx_ctx = np->put_tx_ctx;
2145 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2146 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2148 np->put_tx_ctx->dma_len = bcnt;
2149 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2150 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2154 if (unlikely(put_tx++ == np->last_tx.orig))
2155 put_tx = np->first_tx.orig;
2156 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2157 np->put_tx_ctx = np->first_tx_ctx;
2161 /* set last fragment flag */
2162 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2164 /* save skb in this slot's context area */
2165 prev_tx_ctx->skb = skb;
2167 if (skb_is_gso(skb))
2168 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2170 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2171 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2173 spin_lock_irqsave(&np->lock, flags);
2176 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2177 np->put_tx.orig = put_tx;
2179 spin_unlock_irqrestore(&np->lock, flags);
2181 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
2182 dev->name, entries, tx_flags_extra);
2185 for (j=0; j<64; j++) {
2187 dprintk("\n%03x:", j);
2188 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2193 dev->trans_start = jiffies;
2194 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2195 return NETDEV_TX_OK;
2198 static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
2200 struct fe_priv *np = netdev_priv(dev);
2203 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2207 u32 size = skb->len-skb->data_len;
2208 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2210 struct ring_desc_ex* put_tx;
2211 struct ring_desc_ex* start_tx;
2212 struct ring_desc_ex* prev_tx;
2213 struct nv_skb_map* prev_tx_ctx;
2214 struct nv_skb_map* start_tx_ctx;
2215 unsigned long flags;
2217 /* add fragments to entries count */
2218 for (i = 0; i < fragments; i++) {
2219 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2220 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2223 spin_lock_irqsave(&np->lock, flags);
2224 empty_slots = nv_get_empty_tx_slots(np);
2225 if (unlikely(empty_slots <= entries)) {
2226 netif_stop_queue(dev);
2228 spin_unlock_irqrestore(&np->lock, flags);
2229 return NETDEV_TX_BUSY;
2231 spin_unlock_irqrestore(&np->lock, flags);
2233 start_tx = put_tx = np->put_tx.ex;
2234 start_tx_ctx = np->put_tx_ctx;
2236 /* setup the header buffer */
2239 prev_tx_ctx = np->put_tx_ctx;
2240 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2241 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2243 np->put_tx_ctx->dma_len = bcnt;
2244 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2245 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2246 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2248 tx_flags = NV_TX2_VALID;
2251 if (unlikely(put_tx++ == np->last_tx.ex))
2252 put_tx = np->first_tx.ex;
2253 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2254 np->put_tx_ctx = np->first_tx_ctx;
2257 /* setup the fragments */
2258 for (i = 0; i < fragments; i++) {
2259 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2260 u32 size = frag->size;
2265 prev_tx_ctx = np->put_tx_ctx;
2266 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2267 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2269 np->put_tx_ctx->dma_len = bcnt;
2270 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2271 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2272 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2276 if (unlikely(put_tx++ == np->last_tx.ex))
2277 put_tx = np->first_tx.ex;
2278 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2279 np->put_tx_ctx = np->first_tx_ctx;
2283 /* set last fragment flag */
2284 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2286 /* save skb in this slot's context area */
2287 prev_tx_ctx->skb = skb;
2289 if (skb_is_gso(skb))
2290 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2292 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2293 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2296 if (likely(!np->vlangrp)) {
2297 start_tx->txvlan = 0;
2299 if (vlan_tx_tag_present(skb))
2300 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
2302 start_tx->txvlan = 0;
2305 spin_lock_irqsave(&np->lock, flags);
2308 /* Limit the number of outstanding tx. Setup all fragments, but
2309 * do not set the VALID bit on the first descriptor. Save a pointer
2310 * to that descriptor and also for next skb_map element.
2313 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2314 if (!np->tx_change_owner)
2315 np->tx_change_owner = start_tx_ctx;
2317 /* remove VALID bit */
2318 tx_flags &= ~NV_TX2_VALID;
2319 start_tx_ctx->first_tx_desc = start_tx;
2320 start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2321 np->tx_end_flip = np->put_tx_ctx;
2323 np->tx_pkts_in_progress++;
2328 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2329 np->put_tx.ex = put_tx;
2331 spin_unlock_irqrestore(&np->lock, flags);
2333 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2334 dev->name, entries, tx_flags_extra);
2337 for (j=0; j<64; j++) {
2339 dprintk("\n%03x:", j);
2340 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2345 dev->trans_start = jiffies;
2346 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2347 return NETDEV_TX_OK;
2350 static inline void nv_tx_flip_ownership(struct net_device *dev)
2352 struct fe_priv *np = netdev_priv(dev);
2354 np->tx_pkts_in_progress--;
2355 if (np->tx_change_owner) {
2356 np->tx_change_owner->first_tx_desc->flaglen |=
2357 cpu_to_le32(NV_TX2_VALID);
2358 np->tx_pkts_in_progress++;
2360 np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2361 if (np->tx_change_owner == np->tx_end_flip)
2362 np->tx_change_owner = NULL;
2364 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2369 * nv_tx_done: check for completed packets, release the skbs.
2371 * Caller must own np->lock.
2373 static void nv_tx_done(struct net_device *dev)
2375 struct fe_priv *np = netdev_priv(dev);
2377 struct ring_desc* orig_get_tx = np->get_tx.orig;
2379 while ((np->get_tx.orig != np->put_tx.orig) &&
2380 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
2382 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2385 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2386 np->get_tx_ctx->dma_len,
2388 np->get_tx_ctx->dma = 0;
2390 if (np->desc_ver == DESC_VER_1) {
2391 if (flags & NV_TX_LASTPACKET) {
2392 if (flags & NV_TX_ERROR) {
2393 if (flags & NV_TX_UNDERFLOW)
2394 dev->stats.tx_fifo_errors++;
2395 if (flags & NV_TX_CARRIERLOST)
2396 dev->stats.tx_carrier_errors++;
2397 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2398 nv_legacybackoff_reseed(dev);
2399 dev->stats.tx_errors++;
2401 dev->stats.tx_packets++;
2402 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2404 dev_kfree_skb_any(np->get_tx_ctx->skb);
2405 np->get_tx_ctx->skb = NULL;
2408 if (flags & NV_TX2_LASTPACKET) {
2409 if (flags & NV_TX2_ERROR) {
2410 if (flags & NV_TX2_UNDERFLOW)
2411 dev->stats.tx_fifo_errors++;
2412 if (flags & NV_TX2_CARRIERLOST)
2413 dev->stats.tx_carrier_errors++;
2414 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2415 nv_legacybackoff_reseed(dev);
2416 dev->stats.tx_errors++;
2418 dev->stats.tx_packets++;
2419 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2421 dev_kfree_skb_any(np->get_tx_ctx->skb);
2422 np->get_tx_ctx->skb = NULL;
2425 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2426 np->get_tx.orig = np->first_tx.orig;
2427 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2428 np->get_tx_ctx = np->first_tx_ctx;
2430 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2432 netif_wake_queue(dev);
2436 static void nv_tx_done_optimized(struct net_device *dev, int limit)
2438 struct fe_priv *np = netdev_priv(dev);
2440 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
2442 while ((np->get_tx.ex != np->put_tx.ex) &&
2443 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
2446 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2449 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2450 np->get_tx_ctx->dma_len,
2452 np->get_tx_ctx->dma = 0;
2454 if (flags & NV_TX2_LASTPACKET) {
2455 if (!(flags & NV_TX2_ERROR))
2456 dev->stats.tx_packets++;
2458 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2459 if (np->driver_data & DEV_HAS_GEAR_MODE)
2460 nv_gear_backoff_reseed(dev);
2462 nv_legacybackoff_reseed(dev);
2466 dev_kfree_skb_any(np->get_tx_ctx->skb);
2467 np->get_tx_ctx->skb = NULL;
2470 nv_tx_flip_ownership(dev);
2473 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2474 np->get_tx.ex = np->first_tx.ex;
2475 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2476 np->get_tx_ctx = np->first_tx_ctx;
2478 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2480 netif_wake_queue(dev);
2485 * nv_tx_timeout: dev->tx_timeout function
2486 * Called with netif_tx_lock held.
2488 static void nv_tx_timeout(struct net_device *dev)
2490 struct fe_priv *np = netdev_priv(dev);
2491 u8 __iomem *base = get_hwbase(dev);
2494 if (np->msi_flags & NV_MSI_X_ENABLED)
2495 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2497 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2499 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
2504 printk(KERN_INFO "%s: Ring at %lx\n",
2505 dev->name, (unsigned long)np->ring_addr);
2506 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
2507 for (i=0;i<=np->register_size;i+= 32) {
2508 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2510 readl(base + i + 0), readl(base + i + 4),
2511 readl(base + i + 8), readl(base + i + 12),
2512 readl(base + i + 16), readl(base + i + 20),
2513 readl(base + i + 24), readl(base + i + 28));
2515 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2516 for (i=0;i<np->tx_ring_size;i+= 4) {
2517 if (!nv_optimized(np)) {
2518 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2520 le32_to_cpu(np->tx_ring.orig[i].buf),
2521 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2522 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2523 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2524 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2525 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2526 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2527 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2529 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2531 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2532 le32_to_cpu(np->tx_ring.ex[i].buflow),
2533 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2534 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2535 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2536 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2537 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2538 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2539 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2540 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2541 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2542 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2547 spin_lock_irq(&np->lock);
2549 /* 1) stop tx engine */
2552 /* 2) check that the packets were not sent already: */
2553 if (!nv_optimized(np))
2556 nv_tx_done_optimized(dev, np->tx_ring_size);
2558 /* 3) if there are dead entries: clear everything */
2559 if (np->get_tx_ctx != np->put_tx_ctx) {
2560 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
2563 setup_hw_rings(dev, NV_SETUP_TX_RING);
2566 netif_wake_queue(dev);
2568 /* 4) restart tx engine */
2570 spin_unlock_irq(&np->lock);
2574 * Called when the nic notices a mismatch between the actual data len on the
2575 * wire and the len indicated in the 802 header
2577 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2579 int hdrlen; /* length of the 802 header */
2580 int protolen; /* length as stored in the proto field */
2582 /* 1) calculate len according to header */
2583 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2584 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
2587 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
2590 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2591 dev->name, datalen, protolen, hdrlen);
2592 if (protolen > ETH_DATA_LEN)
2593 return datalen; /* Value in proto field not a len, no checks possible */
2596 /* consistency checks: */
2597 if (datalen > ETH_ZLEN) {
2598 if (datalen >= protolen) {
2599 /* more data on wire than in 802 header, trim of
2602 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2603 dev->name, protolen);
2606 /* less data on wire than mentioned in header.
2607 * Discard the packet.
2609 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2614 /* short packet. Accept only if 802 values are also short */
2615 if (protolen > ETH_ZLEN) {
2616 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2620 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2621 dev->name, datalen);
2626 static int nv_rx_process(struct net_device *dev, int limit)
2628 struct fe_priv *np = netdev_priv(dev);
2631 struct sk_buff *skb;
2634 while((np->get_rx.orig != np->put_rx.orig) &&
2635 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2636 (rx_work < limit)) {
2638 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2642 * the packet is for us - immediately tear down the pci mapping.
2643 * TODO: check if a prefetch of the first cacheline improves
2646 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2647 np->get_rx_ctx->dma_len,
2648 PCI_DMA_FROMDEVICE);
2649 skb = np->get_rx_ctx->skb;
2650 np->get_rx_ctx->skb = NULL;
2654 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2655 for (j=0; j<64; j++) {
2657 dprintk("\n%03x:", j);
2658 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2662 /* look at what we actually got: */
2663 if (np->desc_ver == DESC_VER_1) {
2664 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2665 len = flags & LEN_MASK_V1;
2666 if (unlikely(flags & NV_RX_ERROR)) {
2667 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2668 len = nv_getlen(dev, skb->data, len);
2670 dev->stats.rx_errors++;
2675 /* framing errors are soft errors */
2676 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2677 if (flags & NV_RX_SUBSTRACT1) {
2681 /* the rest are hard errors */
2683 if (flags & NV_RX_MISSEDFRAME)
2684 dev->stats.rx_missed_errors++;
2685 if (flags & NV_RX_CRCERR)
2686 dev->stats.rx_crc_errors++;
2687 if (flags & NV_RX_OVERFLOW)
2688 dev->stats.rx_over_errors++;
2689 dev->stats.rx_errors++;
2699 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2700 len = flags & LEN_MASK_V2;
2701 if (unlikely(flags & NV_RX2_ERROR)) {
2702 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2703 len = nv_getlen(dev, skb->data, len);
2705 dev->stats.rx_errors++;
2710 /* framing errors are soft errors */
2711 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2712 if (flags & NV_RX2_SUBSTRACT1) {
2716 /* the rest are hard errors */
2718 if (flags & NV_RX2_CRCERR)
2719 dev->stats.rx_crc_errors++;
2720 if (flags & NV_RX2_OVERFLOW)
2721 dev->stats.rx_over_errors++;
2722 dev->stats.rx_errors++;
2727 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2728 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2729 skb->ip_summed = CHECKSUM_UNNECESSARY;
2735 /* got a valid packet - forward it to the network core */
2737 skb->protocol = eth_type_trans(skb, dev);
2738 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2739 dev->name, len, skb->protocol);
2740 #ifdef CONFIG_FORCEDETH_NAPI
2741 netif_receive_skb(skb);
2745 dev->stats.rx_packets++;
2746 dev->stats.rx_bytes += len;
2748 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2749 np->get_rx.orig = np->first_rx.orig;
2750 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2751 np->get_rx_ctx = np->first_rx_ctx;
2759 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2761 struct fe_priv *np = netdev_priv(dev);
2765 struct sk_buff *skb;
2768 while((np->get_rx.ex != np->put_rx.ex) &&
2769 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2770 (rx_work < limit)) {
2772 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2776 * the packet is for us - immediately tear down the pci mapping.
2777 * TODO: check if a prefetch of the first cacheline improves
2780 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2781 np->get_rx_ctx->dma_len,
2782 PCI_DMA_FROMDEVICE);
2783 skb = np->get_rx_ctx->skb;
2784 np->get_rx_ctx->skb = NULL;
2788 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2789 for (j=0; j<64; j++) {
2791 dprintk("\n%03x:", j);
2792 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2796 /* look at what we actually got: */
2797 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2798 len = flags & LEN_MASK_V2;
2799 if (unlikely(flags & NV_RX2_ERROR)) {
2800 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2801 len = nv_getlen(dev, skb->data, len);
2807 /* framing errors are soft errors */
2808 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2809 if (flags & NV_RX2_SUBSTRACT1) {
2813 /* the rest are hard errors */
2820 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2821 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2822 skb->ip_summed = CHECKSUM_UNNECESSARY;
2824 /* got a valid packet - forward it to the network core */
2826 skb->protocol = eth_type_trans(skb, dev);
2827 prefetch(skb->data);
2829 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2830 dev->name, len, skb->protocol);
2832 if (likely(!np->vlangrp)) {
2833 #ifdef CONFIG_FORCEDETH_NAPI
2834 netif_receive_skb(skb);
2839 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2840 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2841 #ifdef CONFIG_FORCEDETH_NAPI
2842 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2843 vlanflags & NV_RX3_VLAN_TAG_MASK);
2845 vlan_hwaccel_rx(skb, np->vlangrp,
2846 vlanflags & NV_RX3_VLAN_TAG_MASK);
2849 #ifdef CONFIG_FORCEDETH_NAPI
2850 netif_receive_skb(skb);
2857 dev->stats.rx_packets++;
2858 dev->stats.rx_bytes += len;
2863 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2864 np->get_rx.ex = np->first_rx.ex;
2865 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2866 np->get_rx_ctx = np->first_rx_ctx;
2874 static void set_bufsize(struct net_device *dev)
2876 struct fe_priv *np = netdev_priv(dev);
2878 if (dev->mtu <= ETH_DATA_LEN)
2879 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2881 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2885 * nv_change_mtu: dev->change_mtu function
2886 * Called with dev_base_lock held for read.
2888 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2890 struct fe_priv *np = netdev_priv(dev);
2893 if (new_mtu < 64 || new_mtu > np->pkt_limit)
2899 /* return early if the buffer sizes will not change */
2900 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2902 if (old_mtu == new_mtu)
2905 /* synchronized against open : rtnl_lock() held by caller */
2906 if (netif_running(dev)) {
2907 u8 __iomem *base = get_hwbase(dev);
2909 * It seems that the nic preloads valid ring entries into an
2910 * internal buffer. The procedure for flushing everything is
2911 * guessed, there is probably a simpler approach.
2912 * Changing the MTU is a rare event, it shouldn't matter.
2914 nv_disable_irq(dev);
2915 netif_tx_lock_bh(dev);
2916 netif_addr_lock(dev);
2917 spin_lock(&np->lock);
2921 /* drain rx queue */
2923 /* reinit driver view of the rx queue */
2925 if (nv_init_ring(dev)) {
2926 if (!np->in_shutdown)
2927 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2929 /* reinit nic view of the rx queue */
2930 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2931 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2932 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2933 base + NvRegRingSizes);
2935 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2938 /* restart rx engine */
2940 spin_unlock(&np->lock);
2941 netif_addr_unlock(dev);
2942 netif_tx_unlock_bh(dev);
2948 static void nv_copy_mac_to_hw(struct net_device *dev)
2950 u8 __iomem *base = get_hwbase(dev);
2953 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2954 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2955 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2957 writel(mac[0], base + NvRegMacAddrA);
2958 writel(mac[1], base + NvRegMacAddrB);
2962 * nv_set_mac_address: dev->set_mac_address function
2963 * Called with rtnl_lock() held.
2965 static int nv_set_mac_address(struct net_device *dev, void *addr)
2967 struct fe_priv *np = netdev_priv(dev);
2968 struct sockaddr *macaddr = (struct sockaddr*)addr;
2970 if (!is_valid_ether_addr(macaddr->sa_data))
2971 return -EADDRNOTAVAIL;
2973 /* synchronized against open : rtnl_lock() held by caller */
2974 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2976 if (netif_running(dev)) {
2977 netif_tx_lock_bh(dev);
2978 netif_addr_lock(dev);
2979 spin_lock_irq(&np->lock);
2981 /* stop rx engine */
2984 /* set mac address */
2985 nv_copy_mac_to_hw(dev);
2987 /* restart rx engine */
2989 spin_unlock_irq(&np->lock);
2990 netif_addr_unlock(dev);
2991 netif_tx_unlock_bh(dev);
2993 nv_copy_mac_to_hw(dev);
2999 * nv_set_multicast: dev->set_multicast function
3000 * Called with netif_tx_lock held.
3002 static void nv_set_multicast(struct net_device *dev)
3004 struct fe_priv *np = netdev_priv(dev);
3005 u8 __iomem *base = get_hwbase(dev);
3008 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
3010 memset(addr, 0, sizeof(addr));
3011 memset(mask, 0, sizeof(mask));
3013 if (dev->flags & IFF_PROMISC) {
3014 pff |= NVREG_PFF_PROMISC;
3016 pff |= NVREG_PFF_MYADDR;
3018 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
3022 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
3023 if (dev->flags & IFF_ALLMULTI) {
3024 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3026 struct dev_mc_list *walk;
3028 walk = dev->mc_list;
3029 while (walk != NULL) {
3031 a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
3032 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
3040 addr[0] = alwaysOn[0];
3041 addr[1] = alwaysOn[1];
3042 mask[0] = alwaysOn[0] | alwaysOff[0];
3043 mask[1] = alwaysOn[1] | alwaysOff[1];
3045 mask[0] = NVREG_MCASTMASKA_NONE;
3046 mask[1] = NVREG_MCASTMASKB_NONE;
3049 addr[0] |= NVREG_MCASTADDRA_FORCE;
3050 pff |= NVREG_PFF_ALWAYS;
3051 spin_lock_irq(&np->lock);
3053 writel(addr[0], base + NvRegMulticastAddrA);
3054 writel(addr[1], base + NvRegMulticastAddrB);
3055 writel(mask[0], base + NvRegMulticastMaskA);
3056 writel(mask[1], base + NvRegMulticastMaskB);
3057 writel(pff, base + NvRegPacketFilterFlags);
3058 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
3061 spin_unlock_irq(&np->lock);
3064 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3066 struct fe_priv *np = netdev_priv(dev);
3067 u8 __iomem *base = get_hwbase(dev);
3069 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3071 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3072 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3073 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3074 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3075 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3077 writel(pff, base + NvRegPacketFilterFlags);
3080 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3081 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3082 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3083 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3084 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3085 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3086 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3087 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3088 /* limit the number of tx pause frames to a default of 8 */
3089 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3091 writel(pause_enable, base + NvRegTxPauseFrame);
3092 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3093 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3095 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3096 writel(regmisc, base + NvRegMisc1);
3102 * nv_update_linkspeed: Setup the MAC according to the link partner
3103 * @dev: Network device to be configured
3105 * The function queries the PHY and checks if there is a link partner.
3106 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3107 * set to 10 MBit HD.
3109 * The function returns 0 if there is no link partner and 1 if there is
3110 * a good link partner.
3112 static int nv_update_linkspeed(struct net_device *dev)
3114 struct fe_priv *np = netdev_priv(dev);
3115 u8 __iomem *base = get_hwbase(dev);
3118 int adv_lpa, adv_pause, lpa_pause;
3119 int newls = np->linkspeed;
3120 int newdup = np->duplex;
3123 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3127 /* BMSR_LSTATUS is latched, read it twice:
3128 * we want the current value.
3130 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3131 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3133 if (!(mii_status & BMSR_LSTATUS)) {
3134 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
3136 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3142 if (np->autoneg == 0) {
3143 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3144 dev->name, np->fixed_mode);
3145 if (np->fixed_mode & LPA_100FULL) {
3146 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3148 } else if (np->fixed_mode & LPA_100HALF) {
3149 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3151 } else if (np->fixed_mode & LPA_10FULL) {
3152 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3155 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3161 /* check auto negotiation is complete */
3162 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3163 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3164 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3167 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
3171 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3172 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3173 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3174 dev->name, adv, lpa);
3177 if (np->gigabit == PHY_GIGABIT) {
3178 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3179 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3181 if ((control_1000 & ADVERTISE_1000FULL) &&
3182 (status_1000 & LPA_1000FULL)) {
3183 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
3185 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3191 /* FIXME: handle parallel detection properly */
3192 adv_lpa = lpa & adv;
3193 if (adv_lpa & LPA_100FULL) {
3194 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3196 } else if (adv_lpa & LPA_100HALF) {
3197 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3199 } else if (adv_lpa & LPA_10FULL) {
3200 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3202 } else if (adv_lpa & LPA_10HALF) {
3203 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3206 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
3207 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3212 if (np->duplex == newdup && np->linkspeed == newls)
3215 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
3216 dev->name, np->linkspeed, np->duplex, newls, newdup);
3218 np->duplex = newdup;
3219 np->linkspeed = newls;
3221 /* The transmitter and receiver must be restarted for safe update */
3222 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3223 txrxFlags |= NV_RESTART_TX;
3226 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3227 txrxFlags |= NV_RESTART_RX;
3231 if (np->gigabit == PHY_GIGABIT) {
3232 phyreg = readl(base + NvRegSlotTime);
3233 phyreg &= ~(0x3FF00);
3234 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3235 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3236 phyreg |= NVREG_SLOTTIME_10_100_FULL;
3237 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3238 phyreg |= NVREG_SLOTTIME_1000_FULL;
3239 writel(phyreg, base + NvRegSlotTime);
3242 phyreg = readl(base + NvRegPhyInterface);
3243 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3244 if (np->duplex == 0)
3246 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3248 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3250 writel(phyreg, base + NvRegPhyInterface);
3252 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3253 if (phyreg & PHY_RGMII) {
3254 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3255 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3257 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3258 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3259 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3261 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3263 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3267 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3268 txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3270 txreg = NVREG_TX_DEFERRAL_DEFAULT;
3272 writel(txreg, base + NvRegTxDeferral);
3274 if (np->desc_ver == DESC_VER_1) {
3275 txreg = NVREG_TX_WM_DESC1_DEFAULT;
3277 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3278 txreg = NVREG_TX_WM_DESC2_3_1000;
3280 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3282 writel(txreg, base + NvRegTxWatermark);
3284 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
3287 writel(np->linkspeed, base + NvRegLinkSpeed);
3291 /* setup pause frame */
3292 if (np->duplex != 0) {
3293 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3294 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
3295 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
3297 switch (adv_pause) {
3298 case ADVERTISE_PAUSE_CAP:
3299 if (lpa_pause & LPA_PAUSE_CAP) {
3300 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3301 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3302 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3305 case ADVERTISE_PAUSE_ASYM:
3306 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
3308 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3311 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
3312 if (lpa_pause & LPA_PAUSE_CAP)
3314 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3315 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3316 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3318 if (lpa_pause == LPA_PAUSE_ASYM)
3320 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3325 pause_flags = np->pause_flags;
3328 nv_update_pause(dev, pause_flags);
3330 if (txrxFlags & NV_RESTART_TX)
3332 if (txrxFlags & NV_RESTART_RX)
3338 static void nv_linkchange(struct net_device *dev)
3340 if (nv_update_linkspeed(dev)) {
3341 if (!netif_carrier_ok(dev)) {
3342 netif_carrier_on(dev);
3343 printk(KERN_INFO "%s: link up.\n", dev->name);
3347 if (netif_carrier_ok(dev)) {
3348 netif_carrier_off(dev);
3349 printk(KERN_INFO "%s: link down.\n", dev->name);
3355 static void nv_link_irq(struct net_device *dev)
3357 u8 __iomem *base = get_hwbase(dev);
3360 miistat = readl(base + NvRegMIIStatus);
3361 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3362 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
3364 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3366 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
3369 static void nv_msi_workaround(struct fe_priv *np)
3372 /* Need to toggle the msi irq mask within the ethernet device,
3373 * otherwise, future interrupts will not be detected.
3375 if (np->msi_flags & NV_MSI_ENABLED) {
3376 u8 __iomem *base = np->base;
3378 writel(0, base + NvRegMSIIrqMask);
3379 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3383 static irqreturn_t nv_nic_irq(int foo, void *data)
3385 struct net_device *dev = (struct net_device *) data;
3386 struct fe_priv *np = netdev_priv(dev);
3387 u8 __iomem *base = get_hwbase(dev);
3391 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3394 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3395 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3396 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3398 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3399 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3401 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3402 if (!(events & np->irqmask))
3405 nv_msi_workaround(np);
3407 spin_lock(&np->lock);
3409 spin_unlock(&np->lock);
3411 #ifdef CONFIG_FORCEDETH_NAPI
3412 if (events & NVREG_IRQ_RX_ALL) {
3413 spin_lock(&np->lock);
3414 napi_schedule(&np->napi);
3416 /* Disable furthur receive irq's */
3417 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3419 if (np->msi_flags & NV_MSI_X_ENABLED)
3420 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3422 writel(np->irqmask, base + NvRegIrqMask);
3423 spin_unlock(&np->lock);
3426 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
3427 if (unlikely(nv_alloc_rx(dev))) {
3428 spin_lock(&np->lock);
3429 if (!np->in_shutdown)
3430 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3431 spin_unlock(&np->lock);
3435 if (unlikely(events & NVREG_IRQ_LINK)) {
3436 spin_lock(&np->lock);
3438 spin_unlock(&np->lock);
3440 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3441 spin_lock(&np->lock);
3443 spin_unlock(&np->lock);
3444 np->link_timeout = jiffies + LINK_TIMEOUT;
3446 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3447 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3450 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3451 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3454 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3455 spin_lock(&np->lock);
3456 /* disable interrupts on the nic */
3457 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3458 writel(0, base + NvRegIrqMask);
3460 writel(np->irqmask, base + NvRegIrqMask);
3463 if (!np->in_shutdown) {
3464 np->nic_poll_irq = np->irqmask;
3465 np->recover_error = 1;
3466 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3468 spin_unlock(&np->lock);
3471 if (unlikely(i > max_interrupt_work)) {
3472 spin_lock(&np->lock);
3473 /* disable interrupts on the nic */
3474 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3475 writel(0, base + NvRegIrqMask);
3477 writel(np->irqmask, base + NvRegIrqMask);
3480 if (!np->in_shutdown) {
3481 np->nic_poll_irq = np->irqmask;
3482 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3484 spin_unlock(&np->lock);
3485 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3490 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3492 return IRQ_RETVAL(i);
3496 * All _optimized functions are used to help increase performance
3497 * (reduce CPU and increase throughput). They use descripter version 3,
3498 * compiler directives, and reduce memory accesses.
3500 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3502 struct net_device *dev = (struct net_device *) data;
3503 struct fe_priv *np = netdev_priv(dev);
3504 u8 __iomem *base = get_hwbase(dev);
3508 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3511 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3512 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3513 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3515 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3516 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3518 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3519 if (!(events & np->irqmask))
3522 nv_msi_workaround(np);
3524 spin_lock(&np->lock);
3525 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3526 spin_unlock(&np->lock);
3528 #ifdef CONFIG_FORCEDETH_NAPI
3529 if (events & NVREG_IRQ_RX_ALL) {
3530 spin_lock(&np->lock);
3531 napi_schedule(&np->napi);
3533 /* Disable furthur receive irq's */
3534 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3536 if (np->msi_flags & NV_MSI_X_ENABLED)
3537 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3539 writel(np->irqmask, base + NvRegIrqMask);
3540 spin_unlock(&np->lock);
3543 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3544 if (unlikely(nv_alloc_rx_optimized(dev))) {
3545 spin_lock(&np->lock);
3546 if (!np->in_shutdown)
3547 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3548 spin_unlock(&np->lock);
3552 if (unlikely(events & NVREG_IRQ_LINK)) {
3553 spin_lock(&np->lock);
3555 spin_unlock(&np->lock);
3557 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3558 spin_lock(&np->lock);
3560 spin_unlock(&np->lock);
3561 np->link_timeout = jiffies + LINK_TIMEOUT;
3563 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3564 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3567 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3568 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3571 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3572 spin_lock(&np->lock);
3573 /* disable interrupts on the nic */
3574 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3575 writel(0, base + NvRegIrqMask);
3577 writel(np->irqmask, base + NvRegIrqMask);
3580 if (!np->in_shutdown) {
3581 np->nic_poll_irq = np->irqmask;
3582 np->recover_error = 1;
3583 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3585 spin_unlock(&np->lock);
3589 if (unlikely(i > max_interrupt_work)) {
3590 spin_lock(&np->lock);
3591 /* disable interrupts on the nic */
3592 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3593 writel(0, base + NvRegIrqMask);
3595 writel(np->irqmask, base + NvRegIrqMask);
3598 if (!np->in_shutdown) {
3599 np->nic_poll_irq = np->irqmask;
3600 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3602 spin_unlock(&np->lock);
3603 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3608 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3610 return IRQ_RETVAL(i);
3613 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3615 struct net_device *dev = (struct net_device *) data;
3616 struct fe_priv *np = netdev_priv(dev);
3617 u8 __iomem *base = get_hwbase(dev);
3620 unsigned long flags;
3622 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3625 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3626 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3627 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3628 if (!(events & np->irqmask))
3631 spin_lock_irqsave(&np->lock, flags);
3632 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3633 spin_unlock_irqrestore(&np->lock, flags);
3635 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3636 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3639 if (unlikely(i > max_interrupt_work)) {
3640 spin_lock_irqsave(&np->lock, flags);
3641 /* disable interrupts on the nic */
3642 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3645 if (!np->in_shutdown) {
3646 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3647 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3649 spin_unlock_irqrestore(&np->lock, flags);
3650 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
3655 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3657 return IRQ_RETVAL(i);
3660 #ifdef CONFIG_FORCEDETH_NAPI
3661 static int nv_napi_poll(struct napi_struct *napi, int budget)
3663 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3664 struct net_device *dev = np->dev;
3665 u8 __iomem *base = get_hwbase(dev);
3666 unsigned long flags;
3669 if (!nv_optimized(np)) {
3670 pkts = nv_rx_process(dev, budget);
3671 retcode = nv_alloc_rx(dev);
3673 pkts = nv_rx_process_optimized(dev, budget);
3674 retcode = nv_alloc_rx_optimized(dev);
3678 spin_lock_irqsave(&np->lock, flags);
3679 if (!np->in_shutdown)
3680 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3681 spin_unlock_irqrestore(&np->lock, flags);
3684 if (pkts < budget) {
3685 /* re-enable receive interrupts */
3686 spin_lock_irqsave(&np->lock, flags);
3688 __napi_complete(napi);
3690 np->irqmask |= NVREG_IRQ_RX_ALL;
3691 if (np->msi_flags & NV_MSI_X_ENABLED)
3692 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3694 writel(np->irqmask, base + NvRegIrqMask);
3696 spin_unlock_irqrestore(&np->lock, flags);
3702 #ifdef CONFIG_FORCEDETH_NAPI
3703 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3705 struct net_device *dev = (struct net_device *) data;
3706 struct fe_priv *np = netdev_priv(dev);
3707 u8 __iomem *base = get_hwbase(dev);
3710 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3713 /* disable receive interrupts on the nic */
3714 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3716 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3717 napi_schedule(&np->napi);
3722 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3724 struct net_device *dev = (struct net_device *) data;
3725 struct fe_priv *np = netdev_priv(dev);
3726 u8 __iomem *base = get_hwbase(dev);
3729 unsigned long flags;
3731 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3734 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3735 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3736 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3737 if (!(events & np->irqmask))
3740 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3741 if (unlikely(nv_alloc_rx_optimized(dev))) {
3742 spin_lock_irqsave(&np->lock, flags);
3743 if (!np->in_shutdown)
3744 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3745 spin_unlock_irqrestore(&np->lock, flags);
3749 if (unlikely(i > max_interrupt_work)) {
3750 spin_lock_irqsave(&np->lock, flags);
3751 /* disable interrupts on the nic */
3752 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3755 if (!np->in_shutdown) {
3756 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3757 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3759 spin_unlock_irqrestore(&np->lock, flags);
3760 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
3764 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3766 return IRQ_RETVAL(i);
3770 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3772 struct net_device *dev = (struct net_device *) data;
3773 struct fe_priv *np = netdev_priv(dev);
3774 u8 __iomem *base = get_hwbase(dev);
3777 unsigned long flags;
3779 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3782 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3783 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3784 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3785 if (!(events & np->irqmask))
3788 /* check tx in case we reached max loop limit in tx isr */
3789 spin_lock_irqsave(&np->lock, flags);
3790 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3791 spin_unlock_irqrestore(&np->lock, flags);
3793 if (events & NVREG_IRQ_LINK) {
3794 spin_lock_irqsave(&np->lock, flags);
3796 spin_unlock_irqrestore(&np->lock, flags);
3798 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3799 spin_lock_irqsave(&np->lock, flags);
3801 spin_unlock_irqrestore(&np->lock, flags);
3802 np->link_timeout = jiffies + LINK_TIMEOUT;
3804 if (events & NVREG_IRQ_RECOVER_ERROR) {
3805 spin_lock_irq(&np->lock);
3806 /* disable interrupts on the nic */
3807 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3810 if (!np->in_shutdown) {
3811 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3812 np->recover_error = 1;
3813 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3815 spin_unlock_irq(&np->lock);
3818 if (events & (NVREG_IRQ_UNKNOWN)) {
3819 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3822 if (unlikely(i > max_interrupt_work)) {
3823 spin_lock_irqsave(&np->lock, flags);
3824 /* disable interrupts on the nic */
3825 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3828 if (!np->in_shutdown) {
3829 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3830 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3832 spin_unlock_irqrestore(&np->lock, flags);
3833 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
3838 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3840 return IRQ_RETVAL(i);
3843 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3845 struct net_device *dev = (struct net_device *) data;
3846 struct fe_priv *np = netdev_priv(dev);
3847 u8 __iomem *base = get_hwbase(dev);
3850 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3852 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3853 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3854 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3856 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3857 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3860 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3861 if (!(events & NVREG_IRQ_TIMER))
3862 return IRQ_RETVAL(0);
3864 nv_msi_workaround(np);
3866 spin_lock(&np->lock);
3868 spin_unlock(&np->lock);
3870 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3872 return IRQ_RETVAL(1);
3875 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3877 u8 __iomem *base = get_hwbase(dev);
3881 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3882 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3883 * the remaining 8 interrupts.
3885 for (i = 0; i < 8; i++) {
3886 if ((irqmask >> i) & 0x1) {
3887 msixmap |= vector << (i << 2);
3890 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3893 for (i = 0; i < 8; i++) {
3894 if ((irqmask >> (i + 8)) & 0x1) {
3895 msixmap |= vector << (i << 2);
3898 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3901 static int nv_request_irq(struct net_device *dev, int intr_test)
3903 struct fe_priv *np = get_nvpriv(dev);
3904 u8 __iomem *base = get_hwbase(dev);
3907 irqreturn_t (*handler)(int foo, void *data);
3910 handler = nv_nic_irq_test;
3912 if (nv_optimized(np))
3913 handler = nv_nic_irq_optimized;
3915 handler = nv_nic_irq;
3918 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3919 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3920 np->msi_x_entry[i].entry = i;
3922 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3923 np->msi_flags |= NV_MSI_X_ENABLED;
3924 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3925 /* Request irq for rx handling */
3926 sprintf(np->name_rx, "%s-rx", dev->name);
3927 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3928 &nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
3929 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3930 pci_disable_msix(np->pci_dev);
3931 np->msi_flags &= ~NV_MSI_X_ENABLED;
3934 /* Request irq for tx handling */
3935 sprintf(np->name_tx, "%s-tx", dev->name);
3936 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3937 &nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
3938 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3939 pci_disable_msix(np->pci_dev);
3940 np->msi_flags &= ~NV_MSI_X_ENABLED;
3943 /* Request irq for link and timer handling */
3944 sprintf(np->name_other, "%s-other", dev->name);
3945 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3946 &nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
3947 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3948 pci_disable_msix(np->pci_dev);
3949 np->msi_flags &= ~NV_MSI_X_ENABLED;
3952 /* map interrupts to their respective vector */
3953 writel(0, base + NvRegMSIXMap0);
3954 writel(0, base + NvRegMSIXMap1);
3955 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3956 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3957 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3959 /* Request irq for all interrupts */
3960 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3961 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3962 pci_disable_msix(np->pci_dev);
3963 np->msi_flags &= ~NV_MSI_X_ENABLED;
3967 /* map interrupts to vector 0 */
3968 writel(0, base + NvRegMSIXMap0);
3969 writel(0, base + NvRegMSIXMap1);
3973 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3974 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3975 np->msi_flags |= NV_MSI_ENABLED;
3976 dev->irq = np->pci_dev->irq;
3977 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3978 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3979 pci_disable_msi(np->pci_dev);
3980 np->msi_flags &= ~NV_MSI_ENABLED;
3981 dev->irq = np->pci_dev->irq;
3985 /* map interrupts to vector 0 */
3986 writel(0, base + NvRegMSIMap0);
3987 writel(0, base + NvRegMSIMap1);
3988 /* enable msi vector 0 */
3989 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3993 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4000 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
4002 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
4007 static void nv_free_irq(struct net_device *dev)
4009 struct fe_priv *np = get_nvpriv(dev);
4012 if (np->msi_flags & NV_MSI_X_ENABLED) {
4013 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
4014 free_irq(np->msi_x_entry[i].vector, dev);
4016 pci_disable_msix(np->pci_dev);
4017 np->msi_flags &= ~NV_MSI_X_ENABLED;
4019 free_irq(np->pci_dev->irq, dev);
4020 if (np->msi_flags & NV_MSI_ENABLED) {
4021 pci_disable_msi(np->pci_dev);
4022 np->msi_flags &= ~NV_MSI_ENABLED;
4027 static void nv_do_nic_poll(unsigned long data)
4029 struct net_device *dev = (struct net_device *) data;
4030 struct fe_priv *np = netdev_priv(dev);
4031 u8 __iomem *base = get_hwbase(dev);
4035 * First disable irq(s) and then
4036 * reenable interrupts on the nic, we have to do this before calling
4037 * nv_nic_irq because that may decide to do otherwise
4040 if (!using_multi_irqs(dev)) {
4041 if (np->msi_flags & NV_MSI_X_ENABLED)
4042 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4044 disable_irq_lockdep(np->pci_dev->irq);
4047 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4048 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4049 mask |= NVREG_IRQ_RX_ALL;
4051 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4052 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4053 mask |= NVREG_IRQ_TX_ALL;
4055 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4056 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4057 mask |= NVREG_IRQ_OTHER;
4060 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
4062 if (np->recover_error) {
4063 np->recover_error = 0;
4064 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
4065 if (netif_running(dev)) {
4066 netif_tx_lock_bh(dev);
4067 netif_addr_lock(dev);
4068 spin_lock(&np->lock);
4072 /* drain rx queue */
4074 /* reinit driver view of the rx queue */
4076 if (nv_init_ring(dev)) {
4077 if (!np->in_shutdown)
4078 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4080 /* reinit nic view of the rx queue */
4081 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4082 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4083 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4084 base + NvRegRingSizes);
4086 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4089 /* restart rx engine */
4091 spin_unlock(&np->lock);
4092 netif_addr_unlock(dev);
4093 netif_tx_unlock_bh(dev);
4097 writel(mask, base + NvRegIrqMask);
4100 if (!using_multi_irqs(dev)) {
4101 np->nic_poll_irq = 0;
4102 if (nv_optimized(np))
4103 nv_nic_irq_optimized(0, dev);
4106 if (np->msi_flags & NV_MSI_X_ENABLED)
4107 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4109 enable_irq_lockdep(np->pci_dev->irq);
4111 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4112 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
4113 nv_nic_irq_rx(0, dev);
4114 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4116 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4117 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
4118 nv_nic_irq_tx(0, dev);
4119 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4121 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4122 np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
4123 nv_nic_irq_other(0, dev);
4124 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4130 #ifdef CONFIG_NET_POLL_CONTROLLER
4131 static void nv_poll_controller(struct net_device *dev)
4133 nv_do_nic_poll((unsigned long) dev);
4137 static void nv_do_stats_poll(unsigned long data)
4139 struct net_device *dev = (struct net_device *) data;
4140 struct fe_priv *np = netdev_priv(dev);
4142 nv_get_hw_stats(dev);
4144 if (!np->in_shutdown)
4145 mod_timer(&np->stats_poll,
4146 round_jiffies(jiffies + STATS_INTERVAL));
4149 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4151 struct fe_priv *np = netdev_priv(dev);
4152 strcpy(info->driver, DRV_NAME);
4153 strcpy(info->version, FORCEDETH_VERSION);
4154 strcpy(info->bus_info, pci_name(np->pci_dev));
4157 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4159 struct fe_priv *np = netdev_priv(dev);
4160 wolinfo->supported = WAKE_MAGIC;
4162 spin_lock_irq(&np->lock);
4164 wolinfo->wolopts = WAKE_MAGIC;
4165 spin_unlock_irq(&np->lock);
4168 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4170 struct fe_priv *np = netdev_priv(dev);
4171 u8 __iomem *base = get_hwbase(dev);
4174 if (wolinfo->wolopts == 0) {
4176 } else if (wolinfo->wolopts & WAKE_MAGIC) {
4178 flags = NVREG_WAKEUPFLAGS_ENABLE;
4180 if (netif_running(dev)) {
4181 spin_lock_irq(&np->lock);
4182 writel(flags, base + NvRegWakeUpFlags);
4183 spin_unlock_irq(&np->lock);
4188 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4190 struct fe_priv *np = netdev_priv(dev);
4193 spin_lock_irq(&np->lock);
4194 ecmd->port = PORT_MII;
4195 if (!netif_running(dev)) {
4196 /* We do not track link speed / duplex setting if the
4197 * interface is disabled. Force a link check */
4198 if (nv_update_linkspeed(dev)) {
4199 if (!netif_carrier_ok(dev))
4200 netif_carrier_on(dev);
4202 if (netif_carrier_ok(dev))
4203 netif_carrier_off(dev);
4207 if (netif_carrier_ok(dev)) {
4208 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4209 case NVREG_LINKSPEED_10:
4210 ecmd->speed = SPEED_10;
4212 case NVREG_LINKSPEED_100:
4213 ecmd->speed = SPEED_100;
4215 case NVREG_LINKSPEED_1000:
4216 ecmd->speed = SPEED_1000;
4219 ecmd->duplex = DUPLEX_HALF;
4221 ecmd->duplex = DUPLEX_FULL;
4227 ecmd->autoneg = np->autoneg;
4229 ecmd->advertising = ADVERTISED_MII;
4231 ecmd->advertising |= ADVERTISED_Autoneg;
4232 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4233 if (adv & ADVERTISE_10HALF)
4234 ecmd->advertising |= ADVERTISED_10baseT_Half;
4235 if (adv & ADVERTISE_10FULL)
4236 ecmd->advertising |= ADVERTISED_10baseT_Full;
4237 if (adv & ADVERTISE_100HALF)
4238 ecmd->advertising |= ADVERTISED_100baseT_Half;
4239 if (adv & ADVERTISE_100FULL)
4240 ecmd->advertising |= ADVERTISED_100baseT_Full;
4241 if (np->gigabit == PHY_GIGABIT) {
4242 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4243 if (adv & ADVERTISE_1000FULL)
4244 ecmd->advertising |= ADVERTISED_1000baseT_Full;
4247 ecmd->supported = (SUPPORTED_Autoneg |
4248 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4249 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4251 if (np->gigabit == PHY_GIGABIT)
4252 ecmd->supported |= SUPPORTED_1000baseT_Full;
4254 ecmd->phy_address = np->phyaddr;
4255 ecmd->transceiver = XCVR_EXTERNAL;
4257 /* ignore maxtxpkt, maxrxpkt for now */
4258 spin_unlock_irq(&np->lock);
4262 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4264 struct fe_priv *np = netdev_priv(dev);
4266 if (ecmd->port != PORT_MII)
4268 if (ecmd->transceiver != XCVR_EXTERNAL)
4270 if (ecmd->phy_address != np->phyaddr) {
4271 /* TODO: support switching between multiple phys. Should be
4272 * trivial, but not enabled due to lack of test hardware. */
4275 if (ecmd->autoneg == AUTONEG_ENABLE) {
4278 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4279 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4280 if (np->gigabit == PHY_GIGABIT)
4281 mask |= ADVERTISED_1000baseT_Full;
4283 if ((ecmd->advertising & mask) == 0)
4286 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
4287 /* Note: autonegotiation disable, speed 1000 intentionally
4288 * forbidden - noone should need that. */
4290 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
4292 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4298 netif_carrier_off(dev);
4299 if (netif_running(dev)) {
4300 unsigned long flags;
4302 nv_disable_irq(dev);
4303 netif_tx_lock_bh(dev);
4304 netif_addr_lock(dev);
4305 /* with plain spinlock lockdep complains */
4306 spin_lock_irqsave(&np->lock, flags);
4309 * this can take some time, and interrupts are disabled
4310 * due to spin_lock_irqsave, but let's hope no daemon
4311 * is going to change the settings very often...
4313 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4314 * + some minor delays, which is up to a second approximately
4317 spin_unlock_irqrestore(&np->lock, flags);
4318 netif_addr_unlock(dev);
4319 netif_tx_unlock_bh(dev);
4322 if (ecmd->autoneg == AUTONEG_ENABLE) {
4327 /* advertise only what has been requested */
4328 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4329 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4330 if (ecmd->advertising & ADVERTISED_10baseT_Half)
4331 adv |= ADVERTISE_10HALF;
4332 if (ecmd->advertising & ADVERTISED_10baseT_Full)
4333 adv |= ADVERTISE_10FULL;
4334 if (ecmd->advertising & ADVERTISED_100baseT_Half)
4335 adv |= ADVERTISE_100HALF;
4336 if (ecmd->advertising & ADVERTISED_100baseT_Full)
4337 adv |= ADVERTISE_100FULL;
4338 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4339 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4340 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4341 adv |= ADVERTISE_PAUSE_ASYM;
4342 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4344 if (np->gigabit == PHY_GIGABIT) {
4345 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4346 adv &= ~ADVERTISE_1000FULL;
4347 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4348 adv |= ADVERTISE_1000FULL;
4349 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4352 if (netif_running(dev))
4353 printk(KERN_INFO "%s: link down.\n", dev->name);
4354 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4355 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4356 bmcr |= BMCR_ANENABLE;
4357 /* reset the phy in order for settings to stick,
4358 * and cause autoneg to start */
4359 if (phy_reset(dev, bmcr)) {
4360 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4364 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4365 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4372 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4373 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4374 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4375 adv |= ADVERTISE_10HALF;
4376 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4377 adv |= ADVERTISE_10FULL;
4378 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4379 adv |= ADVERTISE_100HALF;
4380 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4381 adv |= ADVERTISE_100FULL;
4382 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4383 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
4384 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4385 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4387 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4388 adv |= ADVERTISE_PAUSE_ASYM;
4389 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4391 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4392 np->fixed_mode = adv;
4394 if (np->gigabit == PHY_GIGABIT) {
4395 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4396 adv &= ~ADVERTISE_1000FULL;
4397 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4400 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4401 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4402 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4403 bmcr |= BMCR_FULLDPLX;
4404 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4405 bmcr |= BMCR_SPEED100;
4406 if (np->phy_oui == PHY_OUI_MARVELL) {
4407 /* reset the phy in order for forced mode settings to stick */
4408 if (phy_reset(dev, bmcr)) {
4409 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4413 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4414 if (netif_running(dev)) {
4415 /* Wait a bit and then reconfigure the nic. */
4422 if (netif_running(dev)) {
4430 #define FORCEDETH_REGS_VER 1
4432 static int nv_get_regs_len(struct net_device *dev)
4434 struct fe_priv *np = netdev_priv(dev);
4435 return np->register_size;
4438 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4440 struct fe_priv *np = netdev_priv(dev);
4441 u8 __iomem *base = get_hwbase(dev);
4445 regs->version = FORCEDETH_REGS_VER;
4446 spin_lock_irq(&np->lock);
4447 for (i = 0;i <= np->register_size/sizeof(u32); i++)
4448 rbuf[i] = readl(base + i*sizeof(u32));
4449 spin_unlock_irq(&np->lock);
4452 static int nv_nway_reset(struct net_device *dev)
4454 struct fe_priv *np = netdev_priv(dev);
4460 netif_carrier_off(dev);
4461 if (netif_running(dev)) {
4462 nv_disable_irq(dev);
4463 netif_tx_lock_bh(dev);
4464 netif_addr_lock(dev);
4465 spin_lock(&np->lock);
4468 spin_unlock(&np->lock);
4469 netif_addr_unlock(dev);
4470 netif_tx_unlock_bh(dev);
4471 printk(KERN_INFO "%s: link down.\n", dev->name);
4474 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4475 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4476 bmcr |= BMCR_ANENABLE;
4477 /* reset the phy in order for settings to stick*/
4478 if (phy_reset(dev, bmcr)) {
4479 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4483 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4484 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4487 if (netif_running(dev)) {
4499 static int nv_set_tso(struct net_device *dev, u32 value)
4501 struct fe_priv *np = netdev_priv(dev);
4503 if ((np->driver_data & DEV_HAS_CHECKSUM))
4504 return ethtool_op_set_tso(dev, value);
4509 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4511 struct fe_priv *np = netdev_priv(dev);
4513 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4514 ring->rx_mini_max_pending = 0;
4515 ring->rx_jumbo_max_pending = 0;
4516 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4518 ring->rx_pending = np->rx_ring_size;
4519 ring->rx_mini_pending = 0;
4520 ring->rx_jumbo_pending = 0;
4521 ring->tx_pending = np->tx_ring_size;
4524 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4526 struct fe_priv *np = netdev_priv(dev);
4527 u8 __iomem *base = get_hwbase(dev);
4528 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4529 dma_addr_t ring_addr;
4531 if (ring->rx_pending < RX_RING_MIN ||
4532 ring->tx_pending < TX_RING_MIN ||
4533 ring->rx_mini_pending != 0 ||
4534 ring->rx_jumbo_pending != 0 ||
4535 (np->desc_ver == DESC_VER_1 &&
4536 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4537 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4538 (np->desc_ver != DESC_VER_1 &&
4539 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4540 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4544 /* allocate new rings */
4545 if (!nv_optimized(np)) {
4546 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4547 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4550 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4551 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4554 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4555 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4556 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4557 /* fall back to old rings */
4558 if (!nv_optimized(np)) {
4560 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4561 rxtx_ring, ring_addr);
4564 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4565 rxtx_ring, ring_addr);
4574 if (netif_running(dev)) {
4575 nv_disable_irq(dev);
4576 netif_tx_lock_bh(dev);
4577 netif_addr_lock(dev);
4578 spin_lock(&np->lock);
4588 /* set new values */
4589 np->rx_ring_size = ring->rx_pending;
4590 np->tx_ring_size = ring->tx_pending;
4592 if (!nv_optimized(np)) {
4593 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4594 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4596 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4597 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4599 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4600 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
4601 np->ring_addr = ring_addr;
4603 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4604 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4606 if (netif_running(dev)) {
4607 /* reinit driver view of the queues */
4609 if (nv_init_ring(dev)) {
4610 if (!np->in_shutdown)
4611 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4614 /* reinit nic view of the queues */
4615 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4616 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4617 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4618 base + NvRegRingSizes);
4620 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4623 /* restart engines */
4625 spin_unlock(&np->lock);
4626 netif_addr_unlock(dev);
4627 netif_tx_unlock_bh(dev);
4635 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4637 struct fe_priv *np = netdev_priv(dev);
4639 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4640 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4641 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4644 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4646 struct fe_priv *np = netdev_priv(dev);
4649 if ((!np->autoneg && np->duplex == 0) ||
4650 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4651 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
4655 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4656 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4660 netif_carrier_off(dev);
4661 if (netif_running(dev)) {
4662 nv_disable_irq(dev);
4663 netif_tx_lock_bh(dev);
4664 netif_addr_lock(dev);
4665 spin_lock(&np->lock);
4668 spin_unlock(&np->lock);
4669 netif_addr_unlock(dev);
4670 netif_tx_unlock_bh(dev);
4673 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4674 if (pause->rx_pause)
4675 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4676 if (pause->tx_pause)
4677 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4679 if (np->autoneg && pause->autoneg) {
4680 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4682 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4683 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4684 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4685 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4686 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4687 adv |= ADVERTISE_PAUSE_ASYM;
4688 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4690 if (netif_running(dev))
4691 printk(KERN_INFO "%s: link down.\n", dev->name);
4692 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4693 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4694 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4696 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4697 if (pause->rx_pause)
4698 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4699 if (pause->tx_pause)
4700 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4702 if (!netif_running(dev))
4703 nv_update_linkspeed(dev);
4705 nv_update_pause(dev, np->pause_flags);
4708 if (netif_running(dev)) {
4715 static u32 nv_get_rx_csum(struct net_device *dev)
4717 struct fe_priv *np = netdev_priv(dev);
4718 return (np->rx_csum) != 0;
4721 static int nv_set_rx_csum(struct net_device *dev, u32 data)
4723 struct fe_priv *np = netdev_priv(dev);
4724 u8 __iomem *base = get_hwbase(dev);
4727 if (np->driver_data & DEV_HAS_CHECKSUM) {
4730 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4733 /* vlan is dependent on rx checksum offload */
4734 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4735 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4737 if (netif_running(dev)) {
4738 spin_lock_irq(&np->lock);
4739 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4740 spin_unlock_irq(&np->lock);
4749 static int nv_set_tx_csum(struct net_device *dev, u32 data)
4751 struct fe_priv *np = netdev_priv(dev);
4753 if (np->driver_data & DEV_HAS_CHECKSUM)
4754 return ethtool_op_set_tx_hw_csum(dev, data);
4759 static int nv_set_sg(struct net_device *dev, u32 data)
4761 struct fe_priv *np = netdev_priv(dev);
4763 if (np->driver_data & DEV_HAS_CHECKSUM)
4764 return ethtool_op_set_sg(dev, data);
4769 static int nv_get_sset_count(struct net_device *dev, int sset)
4771 struct fe_priv *np = netdev_priv(dev);
4775 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4776 return NV_TEST_COUNT_EXTENDED;
4778 return NV_TEST_COUNT_BASE;
4780 if (np->driver_data & DEV_HAS_STATISTICS_V1)
4781 return NV_DEV_STATISTICS_V1_COUNT;
4782 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4783 return NV_DEV_STATISTICS_V2_COUNT;
4784 else if (np->driver_data & DEV_HAS_STATISTICS_V3)
4785 return NV_DEV_STATISTICS_V3_COUNT;
4793 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4795 struct fe_priv *np = netdev_priv(dev);
4798 nv_do_stats_poll((unsigned long)dev);
4800 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4803 static int nv_link_test(struct net_device *dev)
4805 struct fe_priv *np = netdev_priv(dev);
4808 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4809 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4811 /* check phy link status */
4812 if (!(mii_status & BMSR_LSTATUS))
4818 static int nv_register_test(struct net_device *dev)
4820 u8 __iomem *base = get_hwbase(dev);
4822 u32 orig_read, new_read;
4825 orig_read = readl(base + nv_registers_test[i].reg);
4827 /* xor with mask to toggle bits */
4828 orig_read ^= nv_registers_test[i].mask;
4830 writel(orig_read, base + nv_registers_test[i].reg);
4832 new_read = readl(base + nv_registers_test[i].reg);
4834 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4837 /* restore original value */
4838 orig_read ^= nv_registers_test[i].mask;
4839 writel(orig_read, base + nv_registers_test[i].reg);
4841 } while (nv_registers_test[++i].reg != 0);
4846 static int nv_interrupt_test(struct net_device *dev)
4848 struct fe_priv *np = netdev_priv(dev);
4849 u8 __iomem *base = get_hwbase(dev);
4852 u32 save_msi_flags, save_poll_interval = 0;
4854 if (netif_running(dev)) {
4855 /* free current irq */
4857 save_poll_interval = readl(base+NvRegPollingInterval);
4860 /* flag to test interrupt handler */
4863 /* setup test irq */
4864 save_msi_flags = np->msi_flags;
4865 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4866 np->msi_flags |= 0x001; /* setup 1 vector */
4867 if (nv_request_irq(dev, 1))
4870 /* setup timer interrupt */
4871 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4872 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4874 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4876 /* wait for at least one interrupt */
4879 spin_lock_irq(&np->lock);
4881 /* flag should be set within ISR */
4882 testcnt = np->intr_test;
4886 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4887 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4888 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4890 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4892 spin_unlock_irq(&np->lock);
4896 np->msi_flags = save_msi_flags;
4898 if (netif_running(dev)) {
4899 writel(save_poll_interval, base + NvRegPollingInterval);
4900 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4901 /* restore original irq */
4902 if (nv_request_irq(dev, 0))
4909 static int nv_loopback_test(struct net_device *dev)
4911 struct fe_priv *np = netdev_priv(dev);
4912 u8 __iomem *base = get_hwbase(dev);
4913 struct sk_buff *tx_skb, *rx_skb;
4914 dma_addr_t test_dma_addr;
4915 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4917 int len, i, pkt_len;
4919 u32 filter_flags = 0;
4920 u32 misc1_flags = 0;
4923 if (netif_running(dev)) {
4924 nv_disable_irq(dev);
4925 filter_flags = readl(base + NvRegPacketFilterFlags);
4926 misc1_flags = readl(base + NvRegMisc1);
4931 /* reinit driver view of the rx queue */
4935 /* setup hardware for loopback */
4936 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4937 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4939 /* reinit nic view of the rx queue */
4940 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4941 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4942 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4943 base + NvRegRingSizes);
4946 /* restart rx engine */
4949 /* setup packet for tx */
4950 pkt_len = ETH_DATA_LEN;
4951 tx_skb = dev_alloc_skb(pkt_len);
4953 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
4954 " of %s\n", dev->name);
4958 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4959 skb_tailroom(tx_skb),
4960 PCI_DMA_FROMDEVICE);
4961 pkt_data = skb_put(tx_skb, pkt_len);
4962 for (i = 0; i < pkt_len; i++)
4963 pkt_data[i] = (u8)(i & 0xff);
4965 if (!nv_optimized(np)) {
4966 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4967 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4969 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
4970 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
4971 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4973 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4974 pci_push(get_hwbase(dev));
4978 /* check for rx of the packet */
4979 if (!nv_optimized(np)) {
4980 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4981 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4984 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
4985 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4988 if (flags & NV_RX_AVAIL) {
4990 } else if (np->desc_ver == DESC_VER_1) {
4991 if (flags & NV_RX_ERROR)
4994 if (flags & NV_RX2_ERROR) {
5000 if (len != pkt_len) {
5002 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
5003 dev->name, len, pkt_len);
5005 rx_skb = np->rx_skb[0].skb;
5006 for (i = 0; i < pkt_len; i++) {
5007 if (rx_skb->data[i] != (u8)(i & 0xff)) {
5009 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
5016 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
5019 pci_unmap_page(np->pci_dev, test_dma_addr,
5020 (skb_end_pointer(tx_skb) - tx_skb->data),
5022 dev_kfree_skb_any(tx_skb);
5027 /* drain rx queue */
5030 if (netif_running(dev)) {
5031 writel(misc1_flags, base + NvRegMisc1);
5032 writel(filter_flags, base + NvRegPacketFilterFlags);
5039 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
5041 struct fe_priv *np = netdev_priv(dev);
5042 u8 __iomem *base = get_hwbase(dev);
5044 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
5046 if (!nv_link_test(dev)) {
5047 test->flags |= ETH_TEST_FL_FAILED;
5051 if (test->flags & ETH_TEST_FL_OFFLINE) {
5052 if (netif_running(dev)) {
5053 netif_stop_queue(dev);
5054 #ifdef CONFIG_FORCEDETH_NAPI
5055 napi_disable(&np->napi);
5057 netif_tx_lock_bh(dev);
5058 netif_addr_lock(dev);
5059 spin_lock_irq(&np->lock);
5060 nv_disable_hw_interrupts(dev, np->irqmask);
5061 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
5062 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5064 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5069 /* drain rx queue */
5071 spin_unlock_irq(&np->lock);
5072 netif_addr_unlock(dev);
5073 netif_tx_unlock_bh(dev);
5076 if (!nv_register_test(dev)) {
5077 test->flags |= ETH_TEST_FL_FAILED;
5081 result = nv_interrupt_test(dev);
5083 test->flags |= ETH_TEST_FL_FAILED;
5091 if (!nv_loopback_test(dev)) {
5092 test->flags |= ETH_TEST_FL_FAILED;
5096 if (netif_running(dev)) {
5097 /* reinit driver view of the rx queue */
5099 if (nv_init_ring(dev)) {
5100 if (!np->in_shutdown)
5101 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5103 /* reinit nic view of the rx queue */
5104 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5105 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5106 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5107 base + NvRegRingSizes);
5109 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5111 /* restart rx engine */
5113 netif_start_queue(dev);
5114 #ifdef CONFIG_FORCEDETH_NAPI
5115 napi_enable(&np->napi);
5117 nv_enable_hw_interrupts(dev, np->irqmask);
5122 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
5124 switch (stringset) {
5126 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
5129 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
5134 static const struct ethtool_ops ops = {
5135 .get_drvinfo = nv_get_drvinfo,
5136 .get_link = ethtool_op_get_link,
5137 .get_wol = nv_get_wol,
5138 .set_wol = nv_set_wol,
5139 .get_settings = nv_get_settings,
5140 .set_settings = nv_set_settings,
5141 .get_regs_len = nv_get_regs_len,
5142 .get_regs = nv_get_regs,
5143 .nway_reset = nv_nway_reset,
5144 .set_tso = nv_set_tso,
5145 .get_ringparam = nv_get_ringparam,
5146 .set_ringparam = nv_set_ringparam,
5147 .get_pauseparam = nv_get_pauseparam,
5148 .set_pauseparam = nv_set_pauseparam,
5149 .get_rx_csum = nv_get_rx_csum,
5150 .set_rx_csum = nv_set_rx_csum,
5151 .set_tx_csum = nv_set_tx_csum,
5152 .set_sg = nv_set_sg,
5153 .get_strings = nv_get_strings,
5154 .get_ethtool_stats = nv_get_ethtool_stats,
5155 .get_sset_count = nv_get_sset_count,
5156 .self_test = nv_self_test,
5159 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5161 struct fe_priv *np = get_nvpriv(dev);
5163 spin_lock_irq(&np->lock);
5165 /* save vlan group */
5169 /* enable vlan on MAC */
5170 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
5172 /* disable vlan on MAC */
5173 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
5174 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
5177 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5179 spin_unlock_irq(&np->lock);
5182 /* The mgmt unit and driver use a semaphore to access the phy during init */
5183 static int nv_mgmt_acquire_sema(struct net_device *dev)
5185 u8 __iomem *base = get_hwbase(dev);
5187 u32 tx_ctrl, mgmt_sema;
5189 for (i = 0; i < 10; i++) {
5190 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5191 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
5196 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
5199 for (i = 0; i < 2; i++) {
5200 tx_ctrl = readl(base + NvRegTransmitterControl);
5201 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5202 writel(tx_ctrl, base + NvRegTransmitterControl);
5204 /* verify that semaphore was acquired */
5205 tx_ctrl = readl(base + NvRegTransmitterControl);
5206 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5207 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE))
5216 static int nv_open(struct net_device *dev)
5218 struct fe_priv *np = netdev_priv(dev);
5219 u8 __iomem *base = get_hwbase(dev);
5224 dprintk(KERN_DEBUG "nv_open: begin\n");
5227 mii_rw(dev, np->phyaddr, MII_BMCR,
5228 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5230 /* erase previous misconfiguration */
5231 if (np->driver_data & DEV_HAS_POWER_CNTRL)
5233 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5234 writel(0, base + NvRegMulticastAddrB);
5235 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5236 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5237 writel(0, base + NvRegPacketFilterFlags);
5239 writel(0, base + NvRegTransmitterControl);
5240 writel(0, base + NvRegReceiverControl);
5242 writel(0, base + NvRegAdapterControl);
5244 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5245 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
5247 /* initialize descriptor rings */
5249 oom = nv_init_ring(dev);
5251 writel(0, base + NvRegLinkSpeed);
5252 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5254 writel(0, base + NvRegUnknownSetupReg6);
5256 np->in_shutdown = 0;
5259 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5260 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5261 base + NvRegRingSizes);
5263 writel(np->linkspeed, base + NvRegLinkSpeed);
5264 if (np->desc_ver == DESC_VER_1)
5265 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5267 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5268 writel(np->txrxctl_bits, base + NvRegTxRxControl);
5269 writel(np->vlanctl_bits, base + NvRegVlanControl);
5271 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5272 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5273 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
5274 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
5276 writel(0, base + NvRegMIIMask);
5277 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5278 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5280 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5281 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5282 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5283 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5285 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5287 get_random_bytes(&low, sizeof(low));
5288 low &= NVREG_SLOTTIME_MASK;
5289 if (np->desc_ver == DESC_VER_1) {
5290 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5292 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5293 /* setup legacy backoff */
5294 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5296 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5297 nv_gear_backoff_reseed(dev);
5300 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5301 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5302 if (poll_interval == -1) {
5303 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5304 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5306 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5309 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5310 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5311 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5312 base + NvRegAdapterControl);
5313 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5314 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5316 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5318 i = readl(base + NvRegPowerState);
5319 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
5320 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5324 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5326 nv_disable_hw_interrupts(dev, np->irqmask);
5328 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5329 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5332 if (nv_request_irq(dev, 0)) {
5336 /* ask for interrupts */
5337 nv_enable_hw_interrupts(dev, np->irqmask);
5339 spin_lock_irq(&np->lock);
5340 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5341 writel(0, base + NvRegMulticastAddrB);
5342 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5343 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5344 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5345 /* One manual link speed update: Interrupts are enabled, future link
5346 * speed changes cause interrupts and are handled by nv_link_irq().
5350 miistat = readl(base + NvRegMIIStatus);
5351 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5352 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
5354 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5357 ret = nv_update_linkspeed(dev);
5359 netif_start_queue(dev);
5360 #ifdef CONFIG_FORCEDETH_NAPI
5361 napi_enable(&np->napi);
5365 netif_carrier_on(dev);
5367 printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
5368 netif_carrier_off(dev);
5371 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5373 /* start statistics timer */
5374 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5375 mod_timer(&np->stats_poll,
5376 round_jiffies(jiffies + STATS_INTERVAL));
5378 spin_unlock_irq(&np->lock);
5386 static int nv_close(struct net_device *dev)
5388 struct fe_priv *np = netdev_priv(dev);
5391 spin_lock_irq(&np->lock);
5392 np->in_shutdown = 1;
5393 spin_unlock_irq(&np->lock);
5394 #ifdef CONFIG_FORCEDETH_NAPI
5395 napi_disable(&np->napi);
5397 synchronize_irq(np->pci_dev->irq);
5399 del_timer_sync(&np->oom_kick);
5400 del_timer_sync(&np->nic_poll);
5401 del_timer_sync(&np->stats_poll);
5403 netif_stop_queue(dev);
5404 spin_lock_irq(&np->lock);
5408 /* disable interrupts on the nic or we will lock up */
5409 base = get_hwbase(dev);
5410 nv_disable_hw_interrupts(dev, np->irqmask);
5412 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
5414 spin_unlock_irq(&np->lock);
5420 if (np->wolenabled) {
5421 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5424 /* power down phy */
5425 mii_rw(dev, np->phyaddr, MII_BMCR,
5426 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5429 /* FIXME: power down nic */
5434 static const struct net_device_ops nv_netdev_ops = {
5435 .ndo_open = nv_open,
5436 .ndo_stop = nv_close,
5437 .ndo_get_stats = nv_get_stats,
5438 .ndo_start_xmit = nv_start_xmit,
5439 .ndo_tx_timeout = nv_tx_timeout,
5440 .ndo_change_mtu = nv_change_mtu,
5441 .ndo_validate_addr = eth_validate_addr,
5442 .ndo_set_mac_address = nv_set_mac_address,
5443 .ndo_set_multicast_list = nv_set_multicast,
5444 .ndo_vlan_rx_register = nv_vlan_rx_register,
5445 #ifdef CONFIG_NET_POLL_CONTROLLER
5446 .ndo_poll_controller = nv_poll_controller,
5450 static const struct net_device_ops nv_netdev_ops_optimized = {
5451 .ndo_open = nv_open,
5452 .ndo_stop = nv_close,
5453 .ndo_get_stats = nv_get_stats,
5454 .ndo_start_xmit = nv_start_xmit_optimized,
5455 .ndo_tx_timeout = nv_tx_timeout,
5456 .ndo_change_mtu = nv_change_mtu,
5457 .ndo_validate_addr = eth_validate_addr,
5458 .ndo_set_mac_address = nv_set_mac_address,
5459 .ndo_set_multicast_list = nv_set_multicast,
5460 .ndo_vlan_rx_register = nv_vlan_rx_register,
5461 #ifdef CONFIG_NET_POLL_CONTROLLER
5462 .ndo_poll_controller = nv_poll_controller,
5466 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5468 struct net_device *dev;
5473 u32 powerstate, txreg;
5474 u32 phystate_orig = 0, phystate;
5475 int phyinitialized = 0;
5476 static int printed_version;
5478 if (!printed_version++)
5479 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet"
5480 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION);
5482 dev = alloc_etherdev(sizeof(struct fe_priv));
5487 np = netdev_priv(dev);
5489 np->pci_dev = pci_dev;
5490 spin_lock_init(&np->lock);
5491 SET_NETDEV_DEV(dev, &pci_dev->dev);
5493 init_timer(&np->oom_kick);
5494 np->oom_kick.data = (unsigned long) dev;
5495 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
5496 init_timer(&np->nic_poll);
5497 np->nic_poll.data = (unsigned long) dev;
5498 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
5499 init_timer(&np->stats_poll);
5500 np->stats_poll.data = (unsigned long) dev;
5501 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
5503 err = pci_enable_device(pci_dev);
5507 pci_set_master(pci_dev);
5509 err = pci_request_regions(pci_dev, DRV_NAME);
5513 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5514 np->register_size = NV_PCI_REGSZ_VER3;
5515 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5516 np->register_size = NV_PCI_REGSZ_VER2;
5518 np->register_size = NV_PCI_REGSZ_VER1;
5522 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5523 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5524 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
5525 pci_resource_len(pci_dev, i),
5526 pci_resource_flags(pci_dev, i));
5527 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5528 pci_resource_len(pci_dev, i) >= np->register_size) {
5529 addr = pci_resource_start(pci_dev, i);
5533 if (i == DEVICE_COUNT_RESOURCE) {
5534 dev_printk(KERN_INFO, &pci_dev->dev,
5535 "Couldn't find register window\n");
5539 /* copy of driver data */
5540 np->driver_data = id->driver_data;
5541 /* copy of device id */
5542 np->device_id = id->device;
5544 /* handle different descriptor versions */
5545 if (id->driver_data & DEV_HAS_HIGH_DMA) {
5546 /* packet format 3: supports 40-bit addressing */
5547 np->desc_ver = DESC_VER_3;
5548 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5550 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK))
5551 dev_printk(KERN_INFO, &pci_dev->dev,
5552 "64-bit DMA failed, using 32-bit addressing\n");
5554 dev->features |= NETIF_F_HIGHDMA;
5555 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5556 dev_printk(KERN_INFO, &pci_dev->dev,
5557 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5560 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5561 /* packet format 2: supports jumbo frames */
5562 np->desc_ver = DESC_VER_2;
5563 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5565 /* original packet format */
5566 np->desc_ver = DESC_VER_1;
5567 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5570 np->pkt_limit = NV_PKTLIMIT_1;
5571 if (id->driver_data & DEV_HAS_LARGEDESC)
5572 np->pkt_limit = NV_PKTLIMIT_2;
5574 if (id->driver_data & DEV_HAS_CHECKSUM) {
5576 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5577 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
5578 dev->features |= NETIF_F_TSO;
5581 np->vlanctl_bits = 0;
5582 if (id->driver_data & DEV_HAS_VLAN) {
5583 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5584 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5588 if ((id->driver_data & DEV_HAS_MSI) && msi) {
5589 np->msi_flags |= NV_MSI_CAPABLE;
5591 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5592 np->msi_flags |= NV_MSI_X_CAPABLE;
5595 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5596 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5597 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5598 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5599 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5604 np->base = ioremap(addr, np->register_size);
5607 dev->base_addr = (unsigned long)np->base;
5609 dev->irq = pci_dev->irq;
5611 np->rx_ring_size = RX_RING_DEFAULT;
5612 np->tx_ring_size = TX_RING_DEFAULT;
5614 if (!nv_optimized(np)) {
5615 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5616 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5618 if (!np->rx_ring.orig)
5620 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5622 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5623 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5625 if (!np->rx_ring.ex)
5627 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5629 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5630 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5631 if (!np->rx_skb || !np->tx_skb)
5634 if (!nv_optimized(np))
5635 dev->netdev_ops = &nv_netdev_ops;
5637 dev->netdev_ops = &nv_netdev_ops_optimized;
5639 #ifdef CONFIG_FORCEDETH_NAPI
5640 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5642 SET_ETHTOOL_OPS(dev, &ops);
5643 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5645 pci_set_drvdata(pci_dev, dev);
5647 /* read the mac address */
5648 base = get_hwbase(dev);
5649 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5650 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5652 /* check the workaround bit for correct mac address order */
5653 txreg = readl(base + NvRegTransmitPoll);
5654 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5655 /* mac address is already in correct order */
5656 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5657 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5658 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5659 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5660 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5661 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5662 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5663 /* mac address is already in correct order */
5664 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5665 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5666 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5667 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5668 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5669 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5671 * Set orig mac address back to the reversed version.
5672 * This flag will be cleared during low power transition.
5673 * Therefore, we should always put back the reversed address.
5675 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5676 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5677 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5679 /* need to reverse mac address to correct order */
5680 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5681 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5682 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5683 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5684 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5685 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5686 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5687 printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
5689 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5691 if (!is_valid_ether_addr(dev->perm_addr)) {
5693 * Bad mac address. At least one bios sets the mac address
5694 * to 01:23:45:67:89:ab
5696 dev_printk(KERN_ERR, &pci_dev->dev,
5697 "Invalid Mac address detected: %pM\n",
5699 dev_printk(KERN_ERR, &pci_dev->dev,
5700 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5701 dev->dev_addr[0] = 0x00;
5702 dev->dev_addr[1] = 0x00;
5703 dev->dev_addr[2] = 0x6c;
5704 get_random_bytes(&dev->dev_addr[3], 3);
5707 dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
5708 pci_name(pci_dev), dev->dev_addr);
5710 /* set mac address */
5711 nv_copy_mac_to_hw(dev);
5713 /* Workaround current PCI init glitch: wakeup bits aren't
5714 * being set from PCI PM capability.
5716 device_init_wakeup(&pci_dev->dev, 1);
5719 writel(0, base + NvRegWakeUpFlags);
5722 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5724 /* take phy and nic out of low power mode */
5725 powerstate = readl(base + NvRegPowerState2);
5726 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5727 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
5728 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
5729 pci_dev->revision >= 0xA3)
5730 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5731 writel(powerstate, base + NvRegPowerState2);
5734 if (np->desc_ver == DESC_VER_1) {
5735 np->tx_flags = NV_TX_VALID;
5737 np->tx_flags = NV_TX2_VALID;
5739 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
5740 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5741 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5742 np->msi_flags |= 0x0003;
5744 np->irqmask = NVREG_IRQMASK_CPU;
5745 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5746 np->msi_flags |= 0x0001;
5749 if (id->driver_data & DEV_NEED_TIMERIRQ)
5750 np->irqmask |= NVREG_IRQ_TIMER;
5751 if (id->driver_data & DEV_NEED_LINKTIMER) {
5752 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5753 np->need_linktimer = 1;
5754 np->link_timeout = jiffies + LINK_TIMEOUT;
5756 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5757 np->need_linktimer = 0;
5760 /* Limit the number of tx's outstanding for hw bug */
5761 if (id->driver_data & DEV_NEED_TX_LIMIT) {
5763 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
5764 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
5765 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
5766 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
5767 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
5768 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
5769 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
5770 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_39) &&
5771 pci_dev->revision >= 0xA2)
5775 /* clear phy state and temporarily halt phy interrupts */
5776 writel(0, base + NvRegMIIMask);
5777 phystate = readl(base + NvRegAdapterControl);
5778 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5780 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5781 writel(phystate, base + NvRegAdapterControl);
5783 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5785 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5786 /* management unit running on the mac? */
5787 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
5788 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
5789 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
5790 if (nv_mgmt_acquire_sema(dev)) {
5791 /* management unit setup the phy already? */
5792 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5793 NVREG_XMITCTL_SYNC_PHY_INIT) {
5794 /* phy is inited by mgmt unit */
5796 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
5798 /* we need to init the phy */
5804 /* find a suitable phy */
5805 for (i = 1; i <= 32; i++) {
5807 int phyaddr = i & 0x1F;
5809 spin_lock_irq(&np->lock);
5810 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5811 spin_unlock_irq(&np->lock);
5812 if (id1 < 0 || id1 == 0xffff)
5814 spin_lock_irq(&np->lock);
5815 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5816 spin_unlock_irq(&np->lock);
5817 if (id2 < 0 || id2 == 0xffff)
5820 np->phy_model = id2 & PHYID2_MODEL_MASK;
5821 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5822 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5823 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5824 pci_name(pci_dev), id1, id2, phyaddr);
5825 np->phyaddr = phyaddr;
5826 np->phy_oui = id1 | id2;
5828 /* Realtek hardcoded phy id1 to all zero's on certain phys */
5829 if (np->phy_oui == PHY_OUI_REALTEK2)
5830 np->phy_oui = PHY_OUI_REALTEK;
5831 /* Setup phy revision for Realtek */
5832 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5833 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5838 dev_printk(KERN_INFO, &pci_dev->dev,
5839 "open: Could not find a valid PHY.\n");
5843 if (!phyinitialized) {
5847 /* see if it is a gigabit phy */
5848 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5849 if (mii_status & PHY_GIGABIT) {
5850 np->gigabit = PHY_GIGABIT;
5854 /* set default link speed settings */
5855 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5859 err = register_netdev(dev);
5861 dev_printk(KERN_INFO, &pci_dev->dev,
5862 "unable to register netdev: %d\n", err);
5866 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, "
5867 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
5878 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5879 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5880 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5882 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5884 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5885 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5886 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5887 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5888 np->need_linktimer ? "lnktim " : "",
5889 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5890 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5897 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5898 pci_set_drvdata(pci_dev, NULL);
5902 iounmap(get_hwbase(dev));
5904 pci_release_regions(pci_dev);
5906 pci_disable_device(pci_dev);
5913 static void nv_restore_phy(struct net_device *dev)
5915 struct fe_priv *np = netdev_priv(dev);
5916 u16 phy_reserved, mii_control;
5918 if (np->phy_oui == PHY_OUI_REALTEK &&
5919 np->phy_model == PHY_MODEL_REALTEK_8201 &&
5920 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
5921 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
5922 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
5923 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
5924 phy_reserved |= PHY_REALTEK_INIT8;
5925 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
5926 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
5928 /* restart auto negotiation */
5929 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
5930 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
5931 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
5935 static void nv_restore_mac_addr(struct pci_dev *pci_dev)
5937 struct net_device *dev = pci_get_drvdata(pci_dev);
5938 struct fe_priv *np = netdev_priv(dev);
5939 u8 __iomem *base = get_hwbase(dev);
5941 /* special op: write back the misordered MAC address - otherwise
5942 * the next nv_probe would see a wrong address.
5944 writel(np->orig_mac[0], base + NvRegMacAddrA);
5945 writel(np->orig_mac[1], base + NvRegMacAddrB);
5946 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
5947 base + NvRegTransmitPoll);
5950 static void __devexit nv_remove(struct pci_dev *pci_dev)
5952 struct net_device *dev = pci_get_drvdata(pci_dev);
5954 unregister_netdev(dev);
5956 nv_restore_mac_addr(pci_dev);
5958 /* restore any phy related changes */
5959 nv_restore_phy(dev);
5961 /* free all structures */
5963 iounmap(get_hwbase(dev));
5964 pci_release_regions(pci_dev);
5965 pci_disable_device(pci_dev);
5967 pci_set_drvdata(pci_dev, NULL);
5971 static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5973 struct net_device *dev = pci_get_drvdata(pdev);
5974 struct fe_priv *np = netdev_priv(dev);
5975 u8 __iomem *base = get_hwbase(dev);
5978 if (netif_running(dev)) {
5982 netif_device_detach(dev);
5984 /* save non-pci configuration space */
5985 for (i = 0;i <= np->register_size/sizeof(u32); i++)
5986 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5988 pci_save_state(pdev);
5989 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5990 pci_disable_device(pdev);
5991 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5995 static int nv_resume(struct pci_dev *pdev)
5997 struct net_device *dev = pci_get_drvdata(pdev);
5998 struct fe_priv *np = netdev_priv(dev);
5999 u8 __iomem *base = get_hwbase(dev);
6002 pci_set_power_state(pdev, PCI_D0);
6003 pci_restore_state(pdev);
6004 /* ack any pending wake events, disable PME */
6005 pci_enable_wake(pdev, PCI_D0, 0);
6007 /* restore non-pci configuration space */
6008 for (i = 0;i <= np->register_size/sizeof(u32); i++)
6009 writel(np->saved_config_space[i], base+i*sizeof(u32));
6011 netif_device_attach(dev);
6012 if (netif_running(dev)) {
6014 nv_set_multicast(dev);
6019 static void nv_shutdown(struct pci_dev *pdev)
6021 struct net_device *dev = pci_get_drvdata(pdev);
6022 struct fe_priv *np = netdev_priv(dev);
6024 if (netif_running(dev))
6027 nv_restore_mac_addr(pdev);
6029 pci_disable_device(pdev);
6030 if (system_state == SYSTEM_POWER_OFF) {
6031 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
6032 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
6033 pci_set_power_state(pdev, PCI_D3hot);
6037 #define nv_suspend NULL
6038 #define nv_shutdown NULL
6039 #define nv_resume NULL
6040 #endif /* CONFIG_PM */
6042 static struct pci_device_id pci_tbl[] = {
6043 { /* nForce Ethernet Controller */
6044 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
6045 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6047 { /* nForce2 Ethernet Controller */
6048 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
6049 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6051 { /* nForce3 Ethernet Controller */
6052 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
6053 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6055 { /* nForce3 Ethernet Controller */
6056 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
6057 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6059 { /* nForce3 Ethernet Controller */
6060 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
6061 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6063 { /* nForce3 Ethernet Controller */
6064 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
6065 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6067 { /* nForce3 Ethernet Controller */
6068 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
6069 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6071 { /* CK804 Ethernet Controller */
6072 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
6073 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6075 { /* CK804 Ethernet Controller */
6076 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
6077 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6079 { /* MCP04 Ethernet Controller */
6080 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
6081 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6083 { /* MCP04 Ethernet Controller */
6084 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
6085 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6087 { /* MCP51 Ethernet Controller */
6088 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
6089 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
6091 { /* MCP51 Ethernet Controller */
6092 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
6093 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
6095 { /* MCP55 Ethernet Controller */
6096 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
6097 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
6099 { /* MCP55 Ethernet Controller */
6100 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
6101 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
6103 { /* MCP61 Ethernet Controller */
6104 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
6105 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6107 { /* MCP61 Ethernet Controller */
6108 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
6109 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6111 { /* MCP61 Ethernet Controller */
6112 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
6113 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6115 { /* MCP61 Ethernet Controller */
6116 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
6117 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6119 { /* MCP65 Ethernet Controller */
6120 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
6121 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6123 { /* MCP65 Ethernet Controller */
6124 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
6125 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6127 { /* MCP65 Ethernet Controller */
6128 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
6129 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6131 { /* MCP65 Ethernet Controller */
6132 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
6133 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6135 { /* MCP67 Ethernet Controller */
6136 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
6137 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
6139 { /* MCP67 Ethernet Controller */
6140 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
6141 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
6143 { /* MCP67 Ethernet Controller */
6144 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
6145 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
6147 { /* MCP67 Ethernet Controller */
6148 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
6149 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
6151 { /* MCP73 Ethernet Controller */
6152 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
6153 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
6155 { /* MCP73 Ethernet Controller */
6156 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
6157 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
6159 { /* MCP73 Ethernet Controller */
6160 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
6161 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
6163 { /* MCP73 Ethernet Controller */
6164 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
6165 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
6167 { /* MCP77 Ethernet Controller */
6168 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
6169 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6171 { /* MCP77 Ethernet Controller */
6172 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
6173 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6175 { /* MCP77 Ethernet Controller */
6176 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
6177 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6179 { /* MCP77 Ethernet Controller */
6180 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
6181 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6183 { /* MCP79 Ethernet Controller */
6184 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
6185 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6187 { /* MCP79 Ethernet Controller */
6188 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
6189 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6191 { /* MCP79 Ethernet Controller */
6192 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
6193 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6195 { /* MCP79 Ethernet Controller */
6196 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
6197 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6202 static struct pci_driver driver = {
6204 .id_table = pci_tbl,
6206 .remove = __devexit_p(nv_remove),
6207 .suspend = nv_suspend,
6208 .resume = nv_resume,
6209 .shutdown = nv_shutdown,
6212 static int __init init_nic(void)
6214 return pci_register_driver(&driver);
6217 static void __exit exit_nic(void)
6219 pci_unregister_driver(&driver);
6222 module_param(max_interrupt_work, int, 0);
6223 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6224 module_param(optimization_mode, int, 0);
6225 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
6226 module_param(poll_interval, int, 0);
6227 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6228 module_param(msi, int, 0);
6229 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6230 module_param(msix, int, 0);
6231 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6232 module_param(dma_64bit, int, 0);
6233 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6234 module_param(phy_cross, int, 0);
6235 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6237 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6238 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6239 MODULE_LICENSE("GPL");
6241 MODULE_DEVICE_TABLE(pci, pci_tbl);
6243 module_init(init_nic);
6244 module_exit(exit_nic);