Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6-block.git] / drivers / net / via-rhine.c
CommitLineData
1da177e4
LT
1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
03a8c661 28 [link no longer provides useful info -jgarzik]
1da177e4
LT
29
30*/
31
32#define DRV_NAME "via-rhine"
e84df485
RL
33#define DRV_VERSION "1.4.3"
34#define DRV_RELDATE "2007-03-06"
1da177e4
LT
35
36
37/* A few user-configurable values.
38 These may be modified when a driver module is loaded. */
39
40static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41static int max_interrupt_work = 20;
42
43/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1518 effectively disables this feature. */
b47157f0
DM
45#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
46 || defined(CONFIG_SPARC) || defined(__ia64__) \
47 || defined(__sh__) || defined(__mips__)
48static int rx_copybreak = 1518;
49#else
1da177e4 50static int rx_copybreak;
b47157f0 51#endif
1da177e4 52
b933b4d9
RL
53/* Work-around for broken BIOSes: they are unable to get the chip back out of
54 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
55static int avoid_D3;
56
1da177e4
LT
57/*
58 * In case you are looking for 'options[]' or 'full_duplex[]', they
59 * are gone. Use ethtool(8) instead.
60 */
61
62/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 The Rhine has a 64 element 8390-like hash table. */
64static const int multicast_filter_limit = 32;
65
66
67/* Operational parameters that are set at compile time. */
68
69/* Keep the ring sizes a power of two for compile efficiency.
70 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
71 Making the Tx ring too large decreases the effectiveness of channel
72 bonding and packet priority.
73 There are no ill effects from too-large receive rings. */
74#define TX_RING_SIZE 16
75#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
633949a1
RL
76#ifdef CONFIG_VIA_RHINE_NAPI
77#define RX_RING_SIZE 64
78#else
1da177e4 79#define RX_RING_SIZE 16
633949a1 80#endif
1da177e4
LT
81
82
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT (2*HZ)
87
88#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
89
90#include <linux/module.h>
91#include <linux/moduleparam.h>
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/timer.h>
95#include <linux/errno.h>
96#include <linux/ioport.h>
97#include <linux/slab.h>
98#include <linux/interrupt.h>
99#include <linux/pci.h>
1e7f0bd8 100#include <linux/dma-mapping.h>
1da177e4
LT
101#include <linux/netdevice.h>
102#include <linux/etherdevice.h>
103#include <linux/skbuff.h>
104#include <linux/init.h>
105#include <linux/delay.h>
106#include <linux/mii.h>
107#include <linux/ethtool.h>
108#include <linux/crc32.h>
109#include <linux/bitops.h>
110#include <asm/processor.h> /* Processor type for cache alignment. */
111#include <asm/io.h>
112#include <asm/irq.h>
113#include <asm/uaccess.h>
e84df485 114#include <linux/dmi.h>
1da177e4
LT
115
116/* These identify the driver base version and may not be removed. */
117static char version[] __devinitdata =
118KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
119
120/* This driver was written to use PCI memory space. Some early versions
121 of the Rhine may only work correctly with I/O space accesses. */
122#ifdef CONFIG_VIA_RHINE_MMIO
123#define USE_MMIO
124#else
125#endif
126
127MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129MODULE_LICENSE("GPL");
130
131module_param(max_interrupt_work, int, 0);
132module_param(debug, int, 0);
133module_param(rx_copybreak, int, 0);
b933b4d9 134module_param(avoid_D3, bool, 0);
1da177e4
LT
135MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
136MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
b933b4d9 138MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
1da177e4
LT
139
140/*
141 Theory of Operation
142
143I. Board Compatibility
144
145This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
146controller.
147
148II. Board-specific settings
149
150Boards with this chip are functional only in a bus-master PCI slot.
151
152Many operational settings are loaded from the EEPROM to the Config word at
153offset 0x78. For most of these settings, this driver assumes that they are
154correct.
155If this driver is compiled to use PCI memory space operations the EEPROM
156must be configured to enable memory ops.
157
158III. Driver operation
159
160IIIa. Ring buffers
161
162This driver uses two statically allocated fixed-size descriptor lists
163formed into rings by a branch from the final descriptor to the beginning of
164the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
165
166IIIb/c. Transmit/Receive Structure
167
168This driver attempts to use a zero-copy receive and transmit scheme.
169
170Alas, all data buffers are required to start on a 32 bit boundary, so
171the driver must often copy transmit packets into bounce buffers.
172
173The driver allocates full frame size skbuffs for the Rx ring buffers at
174open() time and passes the skb->data field to the chip as receive data
175buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176a fresh skbuff is allocated and the frame is copied to the new skbuff.
177When the incoming frame is larger, the skbuff is passed directly up the
178protocol stack. Buffers consumed this way are replaced by newly allocated
179skbuffs in the last phase of rhine_rx().
180
181The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182using a full-sized skbuff for small frames vs. the copying costs of larger
183frames. New boards are typically used in generously configured machines
184and the underfilled buffers have negligible impact compared to the benefit of
185a single allocation size, so the default value of zero results in never
186copying packets. When copying is done, the cost is usually mitigated by using
187a combined copy/checksum routine. Copying also preloads the cache, which is
188most useful with small frames.
189
190Since the VIA chips are only able to transfer data to buffers on 32 bit
191boundaries, the IP header at offset 14 in an ethernet frame isn't
192longword aligned for further processing. Copying these unaligned buffers
193has the beneficial effect of 16-byte aligning the IP header.
194
195IIId. Synchronization
196
197The driver runs as two independent, single-threaded flows of control. One
198is the send-packet routine, which enforces single-threaded use by the
199dev->priv->lock spinlock. The other thread is the interrupt handler, which
200is single threaded by the hardware and interrupt handling software.
201
202The send packet thread has partial control over the Tx ring. It locks the
203dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
204is not available it stops the transmit queue by calling netif_stop_queue.
205
206The interrupt handler has exclusive control over the Rx ring and records stats
207from the Tx ring. After reaping the stats, it marks the Tx queue entry as
208empty by incrementing the dirty_tx mark. If at least half of the entries in
209the Rx ring are available the transmit queue is woken up if it was stopped.
210
211IV. Notes
212
213IVb. References
214
215Preliminary VT86C100A manual from http://www.via.com.tw/
216http://www.scyld.com/expert/100mbps.html
217http://www.scyld.com/expert/NWay.html
218ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
219ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
220
221
222IVc. Errata
223
224The VT86C100A manual is not reliable information.
225The 3043 chip does not handle unaligned transmit or receive buffers, resulting
226in significant performance degradation for bounce buffer copies on transmit
227and unaligned IP headers on receive.
228The chip does not pad to minimum transmit length.
229
230*/
231
232
233/* This table drives the PCI probe routines. It's mostly boilerplate in all
234 of the drivers, and will likely be provided by some future kernel.
235 Note the matching code -- the first table entry matchs all 56** cards but
236 second only the 1234 card.
237*/
238
239enum rhine_revs {
240 VT86C100A = 0x00,
241 VTunknown0 = 0x20,
242 VT6102 = 0x40,
243 VT8231 = 0x50, /* Integrated MAC */
244 VT8233 = 0x60, /* Integrated MAC */
245 VT8235 = 0x74, /* Integrated MAC */
246 VT8237 = 0x78, /* Integrated MAC */
247 VTunknown1 = 0x7C,
248 VT6105 = 0x80,
249 VT6105_B0 = 0x83,
250 VT6105L = 0x8A,
251 VT6107 = 0x8C,
252 VTunknown2 = 0x8E,
253 VT6105M = 0x90, /* Management adapter */
254};
255
256enum rhine_quirks {
257 rqWOL = 0x0001, /* Wake-On-LAN support */
258 rqForceReset = 0x0002,
259 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
260 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
261 rqRhineI = 0x0100, /* See comment below */
262};
263/*
264 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
265 * MMIO as well as for the collision counter and the Tx FIFO underflow
266 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
267 */
268
269/* Beware of PCI posted writes */
270#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
271
46009c8b
JG
272static const struct pci_device_id rhine_pci_tbl[] = {
273 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
274 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
275 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
276 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
1da177e4
LT
277 { } /* terminate list */
278};
279MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
280
281
282/* Offsets to the device registers. */
283enum register_offsets {
284 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
285 ChipCmd1=0x09,
286 IntrStatus=0x0C, IntrEnable=0x0E,
287 MulticastFilter0=0x10, MulticastFilter1=0x14,
288 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
289 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
290 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
291 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
292 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
293 StickyHW=0x83, IntrStatus2=0x84,
294 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
295 WOLcrClr1=0xA6, WOLcgClr=0xA7,
296 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
297};
298
299/* Bits in ConfigD */
300enum backoff_bits {
301 BackOptional=0x01, BackModify=0x02,
302 BackCaptureEffect=0x04, BackRandom=0x08
303};
304
305#ifdef USE_MMIO
306/* Registers we check that mmio and reg are the same. */
307static const int mmio_verify_registers[] = {
308 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
309 0
310};
311#endif
312
313/* Bits in the interrupt status/mask registers. */
314enum intr_status_bits {
315 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
316 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
317 IntrPCIErr=0x0040,
318 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
319 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
320 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
321 IntrRxWakeUp=0x8000,
322 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
323 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
324 IntrTxErrSummary=0x082218,
325};
326
327/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
328enum wol_bits {
329 WOLucast = 0x10,
330 WOLmagic = 0x20,
331 WOLbmcast = 0x30,
332 WOLlnkon = 0x40,
333 WOLlnkoff = 0x80,
334};
335
336/* The Rx and Tx buffer descriptors. */
337struct rx_desc {
53c03f5c
AV
338 __le32 rx_status;
339 __le32 desc_length; /* Chain flag, Buffer/frame length */
340 __le32 addr;
341 __le32 next_desc;
1da177e4
LT
342};
343struct tx_desc {
53c03f5c
AV
344 __le32 tx_status;
345 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
346 __le32 addr;
347 __le32 next_desc;
1da177e4
LT
348};
349
350/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
351#define TXDESC 0x00e08000
352
353enum rx_status_bits {
354 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
355};
356
357/* Bits in *_desc.*_status */
358enum desc_status_bits {
359 DescOwn=0x80000000
360};
361
362/* Bits in ChipCmd. */
363enum chip_cmd_bits {
364 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
365 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
366 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
367 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
368};
369
370struct rhine_private {
371 /* Descriptor rings */
372 struct rx_desc *rx_ring;
373 struct tx_desc *tx_ring;
374 dma_addr_t rx_ring_dma;
375 dma_addr_t tx_ring_dma;
376
377 /* The addresses of receive-in-place skbuffs. */
378 struct sk_buff *rx_skbuff[RX_RING_SIZE];
379 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
380
381 /* The saved address of a sent-in-place packet/buffer, for later free(). */
382 struct sk_buff *tx_skbuff[TX_RING_SIZE];
383 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
384
4be5de25 385 /* Tx bounce buffers (Rhine-I only) */
1da177e4
LT
386 unsigned char *tx_buf[TX_RING_SIZE];
387 unsigned char *tx_bufs;
388 dma_addr_t tx_bufs_dma;
389
390 struct pci_dev *pdev;
391 long pioaddr;
bea3348e
SH
392 struct net_device *dev;
393 struct napi_struct napi;
1da177e4
LT
394 struct net_device_stats stats;
395 spinlock_t lock;
396
397 /* Frequently used values: keep some adjacent for cache effect. */
398 u32 quirks;
399 struct rx_desc *rx_head_desc;
400 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
401 unsigned int cur_tx, dirty_tx;
402 unsigned int rx_buf_sz; /* Based on MTU+slack. */
403 u8 wolopts;
404
405 u8 tx_thresh, rx_thresh;
406
407 struct mii_if_info mii_if;
408 void __iomem *base;
409};
410
411static int mdio_read(struct net_device *dev, int phy_id, int location);
412static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
413static int rhine_open(struct net_device *dev);
414static void rhine_tx_timeout(struct net_device *dev);
415static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
7d12e780 416static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
1da177e4 417static void rhine_tx(struct net_device *dev);
633949a1 418static int rhine_rx(struct net_device *dev, int limit);
1da177e4
LT
419static void rhine_error(struct net_device *dev, int intr_status);
420static void rhine_set_rx_mode(struct net_device *dev);
421static struct net_device_stats *rhine_get_stats(struct net_device *dev);
422static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
7282d491 423static const struct ethtool_ops netdev_ethtool_ops;
1da177e4 424static int rhine_close(struct net_device *dev);
d18c3db5 425static void rhine_shutdown (struct pci_dev *pdev);
1da177e4
LT
426
427#define RHINE_WAIT_FOR(condition) do { \
428 int i=1024; \
429 while (!(condition) && --i) \
430 ; \
431 if (debug > 1 && i < 512) \
432 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
433 DRV_NAME, 1024-i, __func__, __LINE__); \
434} while(0)
435
436static inline u32 get_intr_status(struct net_device *dev)
437{
438 struct rhine_private *rp = netdev_priv(dev);
439 void __iomem *ioaddr = rp->base;
440 u32 intr_status;
441
442 intr_status = ioread16(ioaddr + IntrStatus);
443 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
444 if (rp->quirks & rqStatusWBRace)
445 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
446 return intr_status;
447}
448
449/*
450 * Get power related registers into sane state.
451 * Notify user about past WOL event.
452 */
453static void rhine_power_init(struct net_device *dev)
454{
455 struct rhine_private *rp = netdev_priv(dev);
456 void __iomem *ioaddr = rp->base;
457 u16 wolstat;
458
459 if (rp->quirks & rqWOL) {
460 /* Make sure chip is in power state D0 */
461 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
462
463 /* Disable "force PME-enable" */
464 iowrite8(0x80, ioaddr + WOLcgClr);
465
466 /* Clear power-event config bits (WOL) */
467 iowrite8(0xFF, ioaddr + WOLcrClr);
468 /* More recent cards can manage two additional patterns */
469 if (rp->quirks & rq6patterns)
470 iowrite8(0x03, ioaddr + WOLcrClr1);
471
472 /* Save power-event status bits */
473 wolstat = ioread8(ioaddr + PwrcsrSet);
474 if (rp->quirks & rq6patterns)
475 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
476
477 /* Clear power-event status bits */
478 iowrite8(0xFF, ioaddr + PwrcsrClr);
479 if (rp->quirks & rq6patterns)
480 iowrite8(0x03, ioaddr + PwrcsrClr1);
481
482 if (wolstat) {
483 char *reason;
484 switch (wolstat) {
485 case WOLmagic:
486 reason = "Magic packet";
487 break;
488 case WOLlnkon:
489 reason = "Link went up";
490 break;
491 case WOLlnkoff:
492 reason = "Link went down";
493 break;
494 case WOLucast:
495 reason = "Unicast packet";
496 break;
497 case WOLbmcast:
498 reason = "Multicast/broadcast packet";
499 break;
500 default:
501 reason = "Unknown";
502 }
503 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
504 DRV_NAME, reason);
505 }
506 }
507}
508
509static void rhine_chip_reset(struct net_device *dev)
510{
511 struct rhine_private *rp = netdev_priv(dev);
512 void __iomem *ioaddr = rp->base;
513
514 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
515 IOSYNC;
516
517 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
518 printk(KERN_INFO "%s: Reset not complete yet. "
519 "Trying harder.\n", DRV_NAME);
520
521 /* Force reset */
522 if (rp->quirks & rqForceReset)
523 iowrite8(0x40, ioaddr + MiscCmd);
524
525 /* Reset can take somewhat longer (rare) */
526 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
527 }
528
529 if (debug > 1)
530 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
531 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
532 "failed" : "succeeded");
533}
534
535#ifdef USE_MMIO
536static void enable_mmio(long pioaddr, u32 quirks)
537{
538 int n;
539 if (quirks & rqRhineI) {
540 /* More recent docs say that this bit is reserved ... */
541 n = inb(pioaddr + ConfigA) | 0x20;
542 outb(n, pioaddr + ConfigA);
543 } else {
544 n = inb(pioaddr + ConfigD) | 0x80;
545 outb(n, pioaddr + ConfigD);
546 }
547}
548#endif
549
550/*
551 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
552 * (plus 0x6C for Rhine-I/II)
553 */
554static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
555{
556 struct rhine_private *rp = netdev_priv(dev);
557 void __iomem *ioaddr = rp->base;
558
559 outb(0x20, pioaddr + MACRegEEcsr);
560 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
561
562#ifdef USE_MMIO
563 /*
564 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
565 * MMIO. If reloading EEPROM was done first this could be avoided, but
566 * it is not known if that still works with the "win98-reboot" problem.
567 */
568 enable_mmio(pioaddr, rp->quirks);
569#endif
570
571 /* Turn off EEPROM-controlled wake-up (magic packet) */
572 if (rp->quirks & rqWOL)
573 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
574
575}
576
577#ifdef CONFIG_NET_POLL_CONTROLLER
578static void rhine_poll(struct net_device *dev)
579{
580 disable_irq(dev->irq);
7d12e780 581 rhine_interrupt(dev->irq, (void *)dev);
1da177e4
LT
582 enable_irq(dev->irq);
583}
584#endif
585
633949a1 586#ifdef CONFIG_VIA_RHINE_NAPI
bea3348e 587static int rhine_napipoll(struct napi_struct *napi, int budget)
633949a1 588{
bea3348e
SH
589 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
590 struct net_device *dev = rp->dev;
633949a1 591 void __iomem *ioaddr = rp->base;
bea3348e 592 int work_done;
633949a1 593
bea3348e 594 work_done = rhine_rx(dev, budget);
633949a1 595
bea3348e
SH
596 if (work_done < budget) {
597 netif_rx_complete(dev, napi);
633949a1
RL
598
599 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
600 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
601 IntrTxDone | IntrTxError | IntrTxUnderrun |
602 IntrPCIErr | IntrStatsMax | IntrLinkChange,
603 ioaddr + IntrEnable);
633949a1 604 }
bea3348e 605 return work_done;
633949a1
RL
606}
607#endif
608
de4e7c88 609static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
1da177e4
LT
610{
611 struct rhine_private *rp = netdev_priv(dev);
612
613 /* Reset the chip to erase previous misconfiguration. */
614 rhine_chip_reset(dev);
615
616 /* Rhine-I needs extra time to recuperate before EEPROM reload */
617 if (rp->quirks & rqRhineI)
618 msleep(5);
619
620 /* Reload EEPROM controlled bytes cleared by soft reset */
621 rhine_reload_eeprom(pioaddr, dev);
622}
623
624static int __devinit rhine_init_one(struct pci_dev *pdev,
625 const struct pci_device_id *ent)
626{
627 struct net_device *dev;
628 struct rhine_private *rp;
629 int i, rc;
1da177e4
LT
630 u32 quirks;
631 long pioaddr;
632 long memaddr;
633 void __iomem *ioaddr;
634 int io_size, phy_id;
635 const char *name;
636#ifdef USE_MMIO
637 int bar = 1;
638#else
639 int bar = 0;
640#endif
0795af57 641 DECLARE_MAC_BUF(mac);
1da177e4
LT
642
643/* when built into the kernel, we only print version if device is found */
644#ifndef MODULE
645 static int printed_version;
646 if (!printed_version++)
647 printk(version);
648#endif
649
1da177e4
LT
650 io_size = 256;
651 phy_id = 0;
652 quirks = 0;
653 name = "Rhine";
44c10138 654 if (pdev->revision < VTunknown0) {
1da177e4
LT
655 quirks = rqRhineI;
656 io_size = 128;
657 }
44c10138 658 else if (pdev->revision >= VT6102) {
1da177e4 659 quirks = rqWOL | rqForceReset;
44c10138 660 if (pdev->revision < VT6105) {
1da177e4
LT
661 name = "Rhine II";
662 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
663 }
664 else {
665 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
44c10138 666 if (pdev->revision >= VT6105_B0)
1da177e4 667 quirks |= rq6patterns;
44c10138 668 if (pdev->revision < VT6105M)
1da177e4
LT
669 name = "Rhine III";
670 else
671 name = "Rhine III (Management Adapter)";
672 }
673 }
674
675 rc = pci_enable_device(pdev);
676 if (rc)
677 goto err_out;
678
679 /* this should always be supported */
1e7f0bd8 680 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1da177e4
LT
681 if (rc) {
682 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
683 "the card!?\n");
684 goto err_out;
685 }
686
687 /* sanity check */
688 if ((pci_resource_len(pdev, 0) < io_size) ||
689 (pci_resource_len(pdev, 1) < io_size)) {
690 rc = -EIO;
691 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
692 goto err_out;
693 }
694
695 pioaddr = pci_resource_start(pdev, 0);
696 memaddr = pci_resource_start(pdev, 1);
697
698 pci_set_master(pdev);
699
700 dev = alloc_etherdev(sizeof(struct rhine_private));
701 if (!dev) {
702 rc = -ENOMEM;
703 printk(KERN_ERR "alloc_etherdev failed\n");
704 goto err_out;
705 }
1da177e4
LT
706 SET_NETDEV_DEV(dev, &pdev->dev);
707
708 rp = netdev_priv(dev);
bea3348e 709 rp->dev = dev;
1da177e4
LT
710 rp->quirks = quirks;
711 rp->pioaddr = pioaddr;
712 rp->pdev = pdev;
713
714 rc = pci_request_regions(pdev, DRV_NAME);
715 if (rc)
716 goto err_out_free_netdev;
717
718 ioaddr = pci_iomap(pdev, bar, io_size);
719 if (!ioaddr) {
720 rc = -EIO;
721 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
722 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
723 goto err_out_free_res;
724 }
725
726#ifdef USE_MMIO
727 enable_mmio(pioaddr, quirks);
728
729 /* Check that selected MMIO registers match the PIO ones */
730 i = 0;
731 while (mmio_verify_registers[i]) {
732 int reg = mmio_verify_registers[i++];
733 unsigned char a = inb(pioaddr+reg);
734 unsigned char b = readb(ioaddr+reg);
735 if (a != b) {
736 rc = -EIO;
737 printk(KERN_ERR "MMIO do not match PIO [%02x] "
738 "(%02x != %02x)\n", reg, a, b);
739 goto err_out_unmap;
740 }
741 }
742#endif /* USE_MMIO */
743
744 dev->base_addr = (unsigned long)ioaddr;
745 rp->base = ioaddr;
746
747 /* Get chip registers into a sane state */
748 rhine_power_init(dev);
749 rhine_hw_init(dev, pioaddr);
750
751 for (i = 0; i < 6; i++)
752 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
b81e8e1f 753 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4 754
b81e8e1f 755 if (!is_valid_ether_addr(dev->perm_addr)) {
1da177e4
LT
756 rc = -EIO;
757 printk(KERN_ERR "Invalid MAC address\n");
758 goto err_out_unmap;
759 }
760
761 /* For Rhine-I/II, phy_id is loaded from EEPROM */
762 if (!phy_id)
763 phy_id = ioread8(ioaddr + 0x6C);
764
765 dev->irq = pdev->irq;
766
767 spin_lock_init(&rp->lock);
768 rp->mii_if.dev = dev;
769 rp->mii_if.mdio_read = mdio_read;
770 rp->mii_if.mdio_write = mdio_write;
771 rp->mii_if.phy_id_mask = 0x1f;
772 rp->mii_if.reg_num_mask = 0x1f;
773
774 /* The chip-specific entries in the device structure. */
775 dev->open = rhine_open;
776 dev->hard_start_xmit = rhine_start_tx;
777 dev->stop = rhine_close;
778 dev->get_stats = rhine_get_stats;
779 dev->set_multicast_list = rhine_set_rx_mode;
780 dev->do_ioctl = netdev_ioctl;
781 dev->ethtool_ops = &netdev_ethtool_ops;
782 dev->tx_timeout = rhine_tx_timeout;
783 dev->watchdog_timeo = TX_TIMEOUT;
784#ifdef CONFIG_NET_POLL_CONTROLLER
785 dev->poll_controller = rhine_poll;
633949a1
RL
786#endif
787#ifdef CONFIG_VIA_RHINE_NAPI
bea3348e 788 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1da177e4
LT
789#endif
790 if (rp->quirks & rqRhineI)
791 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
792
793 /* dev->name not defined before register_netdev()! */
794 rc = register_netdev(dev);
795 if (rc)
796 goto err_out_unmap;
797
0795af57 798 printk(KERN_INFO "%s: VIA %s at 0x%lx, %s, IRQ %d.\n",
1da177e4
LT
799 dev->name, name,
800#ifdef USE_MMIO
0795af57 801 memaddr,
1da177e4 802#else
0795af57 803 (long)ioaddr,
1da177e4 804#endif
0795af57 805 print_mac(mac, dev->dev_addr), pdev->irq);
1da177e4
LT
806
807 pci_set_drvdata(pdev, dev);
808
809 {
810 u16 mii_cmd;
811 int mii_status = mdio_read(dev, phy_id, 1);
812 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
813 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
814 if (mii_status != 0xffff && mii_status != 0x0000) {
815 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
816 printk(KERN_INFO "%s: MII PHY found at address "
817 "%d, status 0x%4.4x advertising %4.4x "
818 "Link %4.4x.\n", dev->name, phy_id,
819 mii_status, rp->mii_if.advertising,
820 mdio_read(dev, phy_id, 5));
821
822 /* set IFF_RUNNING */
823 if (mii_status & BMSR_LSTATUS)
824 netif_carrier_on(dev);
825 else
826 netif_carrier_off(dev);
827
828 }
829 }
830 rp->mii_if.phy_id = phy_id;
b933b4d9
RL
831 if (debug > 1 && avoid_D3)
832 printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
833 dev->name);
1da177e4
LT
834
835 return 0;
836
837err_out_unmap:
838 pci_iounmap(pdev, ioaddr);
839err_out_free_res:
840 pci_release_regions(pdev);
841err_out_free_netdev:
842 free_netdev(dev);
843err_out:
844 return rc;
845}
846
847static int alloc_ring(struct net_device* dev)
848{
849 struct rhine_private *rp = netdev_priv(dev);
850 void *ring;
851 dma_addr_t ring_dma;
852
853 ring = pci_alloc_consistent(rp->pdev,
854 RX_RING_SIZE * sizeof(struct rx_desc) +
855 TX_RING_SIZE * sizeof(struct tx_desc),
856 &ring_dma);
857 if (!ring) {
858 printk(KERN_ERR "Could not allocate DMA memory.\n");
859 return -ENOMEM;
860 }
861 if (rp->quirks & rqRhineI) {
862 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
863 PKT_BUF_SZ * TX_RING_SIZE,
864 &rp->tx_bufs_dma);
865 if (rp->tx_bufs == NULL) {
866 pci_free_consistent(rp->pdev,
867 RX_RING_SIZE * sizeof(struct rx_desc) +
868 TX_RING_SIZE * sizeof(struct tx_desc),
869 ring, ring_dma);
870 return -ENOMEM;
871 }
872 }
873
874 rp->rx_ring = ring;
875 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
876 rp->rx_ring_dma = ring_dma;
877 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
878
879 return 0;
880}
881
882static void free_ring(struct net_device* dev)
883{
884 struct rhine_private *rp = netdev_priv(dev);
885
886 pci_free_consistent(rp->pdev,
887 RX_RING_SIZE * sizeof(struct rx_desc) +
888 TX_RING_SIZE * sizeof(struct tx_desc),
889 rp->rx_ring, rp->rx_ring_dma);
890 rp->tx_ring = NULL;
891
892 if (rp->tx_bufs)
893 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
894 rp->tx_bufs, rp->tx_bufs_dma);
895
896 rp->tx_bufs = NULL;
897
898}
899
900static void alloc_rbufs(struct net_device *dev)
901{
902 struct rhine_private *rp = netdev_priv(dev);
903 dma_addr_t next;
904 int i;
905
906 rp->dirty_rx = rp->cur_rx = 0;
907
908 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
909 rp->rx_head_desc = &rp->rx_ring[0];
910 next = rp->rx_ring_dma;
911
912 /* Init the ring entries */
913 for (i = 0; i < RX_RING_SIZE; i++) {
914 rp->rx_ring[i].rx_status = 0;
915 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
916 next += sizeof(struct rx_desc);
917 rp->rx_ring[i].next_desc = cpu_to_le32(next);
918 rp->rx_skbuff[i] = NULL;
919 }
920 /* Mark the last entry as wrapping the ring. */
921 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
922
923 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
924 for (i = 0; i < RX_RING_SIZE; i++) {
925 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
926 rp->rx_skbuff[i] = skb;
927 if (skb == NULL)
928 break;
929 skb->dev = dev; /* Mark as being used by this device. */
930
931 rp->rx_skbuff_dma[i] =
689be439 932 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1da177e4
LT
933 PCI_DMA_FROMDEVICE);
934
935 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
936 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
937 }
938 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
939}
940
941static void free_rbufs(struct net_device* dev)
942{
943 struct rhine_private *rp = netdev_priv(dev);
944 int i;
945
946 /* Free all the skbuffs in the Rx queue. */
947 for (i = 0; i < RX_RING_SIZE; i++) {
948 rp->rx_ring[i].rx_status = 0;
949 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
950 if (rp->rx_skbuff[i]) {
951 pci_unmap_single(rp->pdev,
952 rp->rx_skbuff_dma[i],
953 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
954 dev_kfree_skb(rp->rx_skbuff[i]);
955 }
956 rp->rx_skbuff[i] = NULL;
957 }
958}
959
960static void alloc_tbufs(struct net_device* dev)
961{
962 struct rhine_private *rp = netdev_priv(dev);
963 dma_addr_t next;
964 int i;
965
966 rp->dirty_tx = rp->cur_tx = 0;
967 next = rp->tx_ring_dma;
968 for (i = 0; i < TX_RING_SIZE; i++) {
969 rp->tx_skbuff[i] = NULL;
970 rp->tx_ring[i].tx_status = 0;
971 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
972 next += sizeof(struct tx_desc);
973 rp->tx_ring[i].next_desc = cpu_to_le32(next);
4be5de25
RL
974 if (rp->quirks & rqRhineI)
975 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1da177e4
LT
976 }
977 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
978
979}
980
981static void free_tbufs(struct net_device* dev)
982{
983 struct rhine_private *rp = netdev_priv(dev);
984 int i;
985
986 for (i = 0; i < TX_RING_SIZE; i++) {
987 rp->tx_ring[i].tx_status = 0;
988 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
989 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
990 if (rp->tx_skbuff[i]) {
991 if (rp->tx_skbuff_dma[i]) {
992 pci_unmap_single(rp->pdev,
993 rp->tx_skbuff_dma[i],
994 rp->tx_skbuff[i]->len,
995 PCI_DMA_TODEVICE);
996 }
997 dev_kfree_skb(rp->tx_skbuff[i]);
998 }
999 rp->tx_skbuff[i] = NULL;
1000 rp->tx_buf[i] = NULL;
1001 }
1002}
1003
1004static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1005{
1006 struct rhine_private *rp = netdev_priv(dev);
1007 void __iomem *ioaddr = rp->base;
1008
1009 mii_check_media(&rp->mii_if, debug, init_media);
1010
1011 if (rp->mii_if.full_duplex)
1012 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1013 ioaddr + ChipCmd1);
1014 else
1015 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1016 ioaddr + ChipCmd1);
00b428c2
RL
1017 if (debug > 1)
1018 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1019 rp->mii_if.force_media, netif_carrier_ok(dev));
1020}
1021
1022/* Called after status of force_media possibly changed */
0761be4f 1023static void rhine_set_carrier(struct mii_if_info *mii)
00b428c2
RL
1024{
1025 if (mii->force_media) {
1026 /* autoneg is off: Link is always assumed to be up */
1027 if (!netif_carrier_ok(mii->dev))
1028 netif_carrier_on(mii->dev);
1029 }
1030 else /* Let MMI library update carrier status */
1031 rhine_check_media(mii->dev, 0);
1032 if (debug > 1)
1033 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1034 mii->dev->name, mii->force_media,
1035 netif_carrier_ok(mii->dev));
1da177e4
LT
1036}
1037
1038static void init_registers(struct net_device *dev)
1039{
1040 struct rhine_private *rp = netdev_priv(dev);
1041 void __iomem *ioaddr = rp->base;
1042 int i;
1043
1044 for (i = 0; i < 6; i++)
1045 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1046
1047 /* Initialize other registers. */
1048 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1049 /* Configure initial FIFO thresholds. */
1050 iowrite8(0x20, ioaddr + TxConfig);
1051 rp->tx_thresh = 0x20;
1052 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1053
1054 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1055 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1056
1057 rhine_set_rx_mode(dev);
1058
bea3348e
SH
1059#ifdef CONFIG_VIA_RHINE_NAPI
1060 napi_enable(&rp->napi);
1061#endif
ab197668 1062
1da177e4
LT
1063 /* Enable interrupts by setting the interrupt mask. */
1064 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1065 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1066 IntrTxDone | IntrTxError | IntrTxUnderrun |
1067 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1068 ioaddr + IntrEnable);
1069
1070 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1071 ioaddr + ChipCmd);
1072 rhine_check_media(dev, 1);
1073}
1074
1075/* Enable MII link status auto-polling (required for IntrLinkChange) */
1076static void rhine_enable_linkmon(void __iomem *ioaddr)
1077{
1078 iowrite8(0, ioaddr + MIICmd);
1079 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1080 iowrite8(0x80, ioaddr + MIICmd);
1081
1082 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1083
1084 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1085}
1086
1087/* Disable MII link status auto-polling (required for MDIO access) */
1088static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1089{
1090 iowrite8(0, ioaddr + MIICmd);
1091
1092 if (quirks & rqRhineI) {
1093 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1094
38bb6b28
JL
1095 /* Can be called from ISR. Evil. */
1096 mdelay(1);
1da177e4
LT
1097
1098 /* 0x80 must be set immediately before turning it off */
1099 iowrite8(0x80, ioaddr + MIICmd);
1100
1101 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1102
1103 /* Heh. Now clear 0x80 again. */
1104 iowrite8(0, ioaddr + MIICmd);
1105 }
1106 else
1107 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1108}
1109
1110/* Read and write over the MII Management Data I/O (MDIO) interface. */
1111
1112static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1113{
1114 struct rhine_private *rp = netdev_priv(dev);
1115 void __iomem *ioaddr = rp->base;
1116 int result;
1117
1118 rhine_disable_linkmon(ioaddr, rp->quirks);
1119
1120 /* rhine_disable_linkmon already cleared MIICmd */
1121 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1122 iowrite8(regnum, ioaddr + MIIRegAddr);
1123 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1124 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1125 result = ioread16(ioaddr + MIIData);
1126
1127 rhine_enable_linkmon(ioaddr);
1128 return result;
1129}
1130
1131static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1132{
1133 struct rhine_private *rp = netdev_priv(dev);
1134 void __iomem *ioaddr = rp->base;
1135
1136 rhine_disable_linkmon(ioaddr, rp->quirks);
1137
1138 /* rhine_disable_linkmon already cleared MIICmd */
1139 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1140 iowrite8(regnum, ioaddr + MIIRegAddr);
1141 iowrite16(value, ioaddr + MIIData);
1142 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1143 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1144
1145 rhine_enable_linkmon(ioaddr);
1146}
1147
1148static int rhine_open(struct net_device *dev)
1149{
1150 struct rhine_private *rp = netdev_priv(dev);
1151 void __iomem *ioaddr = rp->base;
1152 int rc;
1153
1fb9df5d 1154 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
1da177e4
LT
1155 dev);
1156 if (rc)
1157 return rc;
1158
1159 if (debug > 1)
1160 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1161 dev->name, rp->pdev->irq);
1162
1163 rc = alloc_ring(dev);
1164 if (rc) {
1165 free_irq(rp->pdev->irq, dev);
1166 return rc;
1167 }
1168 alloc_rbufs(dev);
1169 alloc_tbufs(dev);
1170 rhine_chip_reset(dev);
1171 init_registers(dev);
1172 if (debug > 2)
1173 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1174 "MII status: %4.4x.\n",
1175 dev->name, ioread16(ioaddr + ChipCmd),
1176 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1177
1178 netif_start_queue(dev);
1179
1180 return 0;
1181}
1182
1183static void rhine_tx_timeout(struct net_device *dev)
1184{
1185 struct rhine_private *rp = netdev_priv(dev);
1186 void __iomem *ioaddr = rp->base;
1187
1188 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1189 "%4.4x, resetting...\n",
1190 dev->name, ioread16(ioaddr + IntrStatus),
1191 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1192
1193 /* protect against concurrent rx interrupts */
1194 disable_irq(rp->pdev->irq);
1195
bea3348e
SH
1196#ifdef CONFIG_VIA_RHINE_NAPI
1197 napi_disable(&rp->napi);
1198#endif
1199
1da177e4
LT
1200 spin_lock(&rp->lock);
1201
1202 /* clear all descriptors */
1203 free_tbufs(dev);
1204 free_rbufs(dev);
1205 alloc_tbufs(dev);
1206 alloc_rbufs(dev);
1207
1208 /* Reinitialize the hardware. */
1209 rhine_chip_reset(dev);
1210 init_registers(dev);
1211
1212 spin_unlock(&rp->lock);
1213 enable_irq(rp->pdev->irq);
1214
1215 dev->trans_start = jiffies;
1216 rp->stats.tx_errors++;
1217 netif_wake_queue(dev);
1218}
1219
1220static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1221{
1222 struct rhine_private *rp = netdev_priv(dev);
1223 void __iomem *ioaddr = rp->base;
1224 unsigned entry;
1225
1226 /* Caution: the write order is important here, set the field
1227 with the "ownership" bits last. */
1228
1229 /* Calculate the next Tx descriptor entry. */
1230 entry = rp->cur_tx % TX_RING_SIZE;
1231
5b057c6b
HX
1232 if (skb_padto(skb, ETH_ZLEN))
1233 return 0;
1da177e4
LT
1234
1235 rp->tx_skbuff[entry] = skb;
1236
1237 if ((rp->quirks & rqRhineI) &&
84fa7933 1238 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1da177e4
LT
1239 /* Must use alignment buffer. */
1240 if (skb->len > PKT_BUF_SZ) {
1241 /* packet too long, drop it */
1242 dev_kfree_skb(skb);
1243 rp->tx_skbuff[entry] = NULL;
1244 rp->stats.tx_dropped++;
1245 return 0;
1246 }
3e0d167a
CB
1247
1248 /* Padding is not copied and so must be redone. */
1da177e4 1249 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
3e0d167a
CB
1250 if (skb->len < ETH_ZLEN)
1251 memset(rp->tx_buf[entry] + skb->len, 0,
1252 ETH_ZLEN - skb->len);
1da177e4
LT
1253 rp->tx_skbuff_dma[entry] = 0;
1254 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1255 (rp->tx_buf[entry] -
1256 rp->tx_bufs));
1257 } else {
1258 rp->tx_skbuff_dma[entry] =
1259 pci_map_single(rp->pdev, skb->data, skb->len,
1260 PCI_DMA_TODEVICE);
1261 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1262 }
1263
1264 rp->tx_ring[entry].desc_length =
1265 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1266
1267 /* lock eth irq */
1268 spin_lock_irq(&rp->lock);
1269 wmb();
1270 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1271 wmb();
1272
1273 rp->cur_tx++;
1274
1275 /* Non-x86 Todo: explicitly flush cache lines here. */
1276
1277 /* Wake the potentially-idle transmit channel */
1278 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1279 ioaddr + ChipCmd1);
1280 IOSYNC;
1281
1282 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1283 netif_stop_queue(dev);
1284
1285 dev->trans_start = jiffies;
1286
1287 spin_unlock_irq(&rp->lock);
1288
1289 if (debug > 4) {
1290 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1291 dev->name, rp->cur_tx-1, entry);
1292 }
1293 return 0;
1294}
1295
1296/* The interrupt handler does all of the Rx thread work and cleans up
1297 after the Tx thread. */
7d12e780 1298static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1da177e4
LT
1299{
1300 struct net_device *dev = dev_instance;
1301 struct rhine_private *rp = netdev_priv(dev);
1302 void __iomem *ioaddr = rp->base;
1303 u32 intr_status;
1304 int boguscnt = max_interrupt_work;
1305 int handled = 0;
1306
1307 while ((intr_status = get_intr_status(dev))) {
1308 handled = 1;
1309
1310 /* Acknowledge all of the current interrupt sources ASAP. */
1311 if (intr_status & IntrTxDescRace)
1312 iowrite8(0x08, ioaddr + IntrStatus2);
1313 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1314 IOSYNC;
1315
1316 if (debug > 4)
1317 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1318 dev->name, intr_status);
1319
1320 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
633949a1
RL
1321 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1322#ifdef CONFIG_VIA_RHINE_NAPI
1323 iowrite16(IntrTxAborted |
1324 IntrTxDone | IntrTxError | IntrTxUnderrun |
1325 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1326 ioaddr + IntrEnable);
1327
bea3348e 1328 netif_rx_schedule(dev, &rp->napi);
633949a1
RL
1329#else
1330 rhine_rx(dev, RX_RING_SIZE);
1331#endif
1332 }
1da177e4
LT
1333
1334 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1335 if (intr_status & IntrTxErrSummary) {
1336 /* Avoid scavenging before Tx engine turned off */
1337 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1338 if (debug > 2 &&
1339 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1340 printk(KERN_WARNING "%s: "
2450022a 1341 "rhine_interrupt() Tx engine "
1da177e4
LT
1342 "still on.\n", dev->name);
1343 }
1344 rhine_tx(dev);
1345 }
1346
1347 /* Abnormal error summary/uncommon events handlers. */
1348 if (intr_status & (IntrPCIErr | IntrLinkChange |
1349 IntrStatsMax | IntrTxError | IntrTxAborted |
1350 IntrTxUnderrun | IntrTxDescRace))
1351 rhine_error(dev, intr_status);
1352
1353 if (--boguscnt < 0) {
1354 printk(KERN_WARNING "%s: Too much work at interrupt, "
1355 "status=%#8.8x.\n",
1356 dev->name, intr_status);
1357 break;
1358 }
1359 }
1360
1361 if (debug > 3)
1362 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1363 dev->name, ioread16(ioaddr + IntrStatus));
1364 return IRQ_RETVAL(handled);
1365}
1366
1367/* This routine is logically part of the interrupt handler, but isolated
1368 for clarity. */
1369static void rhine_tx(struct net_device *dev)
1370{
1371 struct rhine_private *rp = netdev_priv(dev);
1372 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1373
1374 spin_lock(&rp->lock);
1375
1376 /* find and cleanup dirty tx descriptors */
1377 while (rp->dirty_tx != rp->cur_tx) {
1378 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1379 if (debug > 6)
ed4030d1 1380 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1da177e4
LT
1381 entry, txstatus);
1382 if (txstatus & DescOwn)
1383 break;
1384 if (txstatus & 0x8000) {
1385 if (debug > 1)
1386 printk(KERN_DEBUG "%s: Transmit error, "
1387 "Tx status %8.8x.\n",
1388 dev->name, txstatus);
1389 rp->stats.tx_errors++;
1390 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1391 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1392 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1393 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1394 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1395 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1396 rp->stats.tx_fifo_errors++;
1397 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1398 break; /* Keep the skb - we try again */
1399 }
1400 /* Transmitter restarted in 'abnormal' handler. */
1401 } else {
1402 if (rp->quirks & rqRhineI)
1403 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1404 else
1405 rp->stats.collisions += txstatus & 0x0F;
1406 if (debug > 6)
1407 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1408 (txstatus >> 3) & 0xF,
1409 txstatus & 0xF);
1410 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1411 rp->stats.tx_packets++;
1412 }
1413 /* Free the original skb. */
1414 if (rp->tx_skbuff_dma[entry]) {
1415 pci_unmap_single(rp->pdev,
1416 rp->tx_skbuff_dma[entry],
1417 rp->tx_skbuff[entry]->len,
1418 PCI_DMA_TODEVICE);
1419 }
1420 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1421 rp->tx_skbuff[entry] = NULL;
1422 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1423 }
1424 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1425 netif_wake_queue(dev);
1426
1427 spin_unlock(&rp->lock);
1428}
1429
633949a1
RL
1430/* Process up to limit frames from receive ring */
1431static int rhine_rx(struct net_device *dev, int limit)
1da177e4
LT
1432{
1433 struct rhine_private *rp = netdev_priv(dev);
633949a1 1434 int count;
1da177e4 1435 int entry = rp->cur_rx % RX_RING_SIZE;
1da177e4
LT
1436
1437 if (debug > 4) {
1438 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1439 dev->name, entry,
1440 le32_to_cpu(rp->rx_head_desc->rx_status));
1441 }
1442
1443 /* If EOP is set on the next entry, it's a new packet. Send it up. */
633949a1 1444 for (count = 0; count < limit; ++count) {
1da177e4
LT
1445 struct rx_desc *desc = rp->rx_head_desc;
1446 u32 desc_status = le32_to_cpu(desc->rx_status);
1447 int data_size = desc_status >> 16;
1448
633949a1
RL
1449 if (desc_status & DescOwn)
1450 break;
1451
1da177e4 1452 if (debug > 4)
ed4030d1 1453 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1da177e4 1454 desc_status);
633949a1 1455
1da177e4
LT
1456 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1457 if ((desc_status & RxWholePkt) != RxWholePkt) {
1458 printk(KERN_WARNING "%s: Oversized Ethernet "
1459 "frame spanned multiple buffers, entry "
1460 "%#x length %d status %8.8x!\n",
1461 dev->name, entry, data_size,
1462 desc_status);
1463 printk(KERN_WARNING "%s: Oversized Ethernet "
1464 "frame %p vs %p.\n", dev->name,
1465 rp->rx_head_desc, &rp->rx_ring[entry]);
1466 rp->stats.rx_length_errors++;
1467 } else if (desc_status & RxErr) {
1468 /* There was a error. */
1469 if (debug > 2)
ed4030d1 1470 printk(KERN_DEBUG "rhine_rx() Rx "
1da177e4
LT
1471 "error was %8.8x.\n",
1472 desc_status);
1473 rp->stats.rx_errors++;
1474 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1475 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1476 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1477 if (desc_status & 0x0002) {
1478 /* this can also be updated outside the interrupt handler */
1479 spin_lock(&rp->lock);
1480 rp->stats.rx_crc_errors++;
1481 spin_unlock(&rp->lock);
1482 }
1483 }
1484 } else {
1485 struct sk_buff *skb;
1486 /* Length should omit the CRC */
1487 int pkt_len = data_size - 4;
1488
1489 /* Check if the packet is long enough to accept without
1490 copying to a minimally-sized skbuff. */
1491 if (pkt_len < rx_copybreak &&
1492 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1da177e4
LT
1493 skb_reserve(skb, 2); /* 16 byte align the IP header */
1494 pci_dma_sync_single_for_cpu(rp->pdev,
1495 rp->rx_skbuff_dma[entry],
1496 rp->rx_buf_sz,
1497 PCI_DMA_FROMDEVICE);
1498
8c7b7faa 1499 skb_copy_to_linear_data(skb,
689be439 1500 rp->rx_skbuff[entry]->data,
8c7b7faa 1501 pkt_len);
1da177e4
LT
1502 skb_put(skb, pkt_len);
1503 pci_dma_sync_single_for_device(rp->pdev,
1504 rp->rx_skbuff_dma[entry],
1505 rp->rx_buf_sz,
1506 PCI_DMA_FROMDEVICE);
1507 } else {
1508 skb = rp->rx_skbuff[entry];
1509 if (skb == NULL) {
1510 printk(KERN_ERR "%s: Inconsistent Rx "
1511 "descriptor chain.\n",
1512 dev->name);
1513 break;
1514 }
1515 rp->rx_skbuff[entry] = NULL;
1516 skb_put(skb, pkt_len);
1517 pci_unmap_single(rp->pdev,
1518 rp->rx_skbuff_dma[entry],
1519 rp->rx_buf_sz,
1520 PCI_DMA_FROMDEVICE);
1521 }
1522 skb->protocol = eth_type_trans(skb, dev);
633949a1
RL
1523#ifdef CONFIG_VIA_RHINE_NAPI
1524 netif_receive_skb(skb);
1525#else
1da177e4 1526 netif_rx(skb);
633949a1 1527#endif
1da177e4
LT
1528 dev->last_rx = jiffies;
1529 rp->stats.rx_bytes += pkt_len;
1530 rp->stats.rx_packets++;
1531 }
1532 entry = (++rp->cur_rx) % RX_RING_SIZE;
1533 rp->rx_head_desc = &rp->rx_ring[entry];
1534 }
1535
1536 /* Refill the Rx ring buffers. */
1537 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1538 struct sk_buff *skb;
1539 entry = rp->dirty_rx % RX_RING_SIZE;
1540 if (rp->rx_skbuff[entry] == NULL) {
1541 skb = dev_alloc_skb(rp->rx_buf_sz);
1542 rp->rx_skbuff[entry] = skb;
1543 if (skb == NULL)
1544 break; /* Better luck next round. */
1545 skb->dev = dev; /* Mark as being used by this device. */
1546 rp->rx_skbuff_dma[entry] =
689be439 1547 pci_map_single(rp->pdev, skb->data,
1da177e4
LT
1548 rp->rx_buf_sz,
1549 PCI_DMA_FROMDEVICE);
1550 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1551 }
1552 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1553 }
633949a1
RL
1554
1555 return count;
1da177e4
LT
1556}
1557
1558/*
1559 * Clears the "tally counters" for CRC errors and missed frames(?).
1560 * It has been reported that some chips need a write of 0 to clear
1561 * these, for others the counters are set to 1 when written to and
1562 * instead cleared when read. So we clear them both ways ...
1563 */
1564static inline void clear_tally_counters(void __iomem *ioaddr)
1565{
1566 iowrite32(0, ioaddr + RxMissed);
1567 ioread16(ioaddr + RxCRCErrs);
1568 ioread16(ioaddr + RxMissed);
1569}
1570
1571static void rhine_restart_tx(struct net_device *dev) {
1572 struct rhine_private *rp = netdev_priv(dev);
1573 void __iomem *ioaddr = rp->base;
1574 int entry = rp->dirty_tx % TX_RING_SIZE;
1575 u32 intr_status;
1576
1577 /*
1578 * If new errors occured, we need to sort them out before doing Tx.
1579 * In that case the ISR will be back here RSN anyway.
1580 */
1581 intr_status = get_intr_status(dev);
1582
1583 if ((intr_status & IntrTxErrSummary) == 0) {
1584
1585 /* We know better than the chip where it should continue. */
1586 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1587 ioaddr + TxRingPtr);
1588
1589 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1590 ioaddr + ChipCmd);
1591 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1592 ioaddr + ChipCmd1);
1593 IOSYNC;
1594 }
1595 else {
1596 /* This should never happen */
1597 if (debug > 1)
1598 printk(KERN_WARNING "%s: rhine_restart_tx() "
1599 "Another error occured %8.8x.\n",
1600 dev->name, intr_status);
1601 }
1602
1603}
1604
1605static void rhine_error(struct net_device *dev, int intr_status)
1606{
1607 struct rhine_private *rp = netdev_priv(dev);
1608 void __iomem *ioaddr = rp->base;
1609
1610 spin_lock(&rp->lock);
1611
1612 if (intr_status & IntrLinkChange)
38bb6b28 1613 rhine_check_media(dev, 0);
1da177e4
LT
1614 if (intr_status & IntrStatsMax) {
1615 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1616 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1617 clear_tally_counters(ioaddr);
1618 }
1619 if (intr_status & IntrTxAborted) {
1620 if (debug > 1)
1621 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1622 dev->name, intr_status);
1623 }
1624 if (intr_status & IntrTxUnderrun) {
1625 if (rp->tx_thresh < 0xE0)
1626 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1627 if (debug > 1)
1628 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1629 "threshold now %2.2x.\n",
1630 dev->name, rp->tx_thresh);
1631 }
1632 if (intr_status & IntrTxDescRace) {
1633 if (debug > 2)
1634 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1635 dev->name);
1636 }
1637 if ((intr_status & IntrTxError) &&
1638 (intr_status & (IntrTxAborted |
1639 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1640 if (rp->tx_thresh < 0xE0) {
1641 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1642 }
1643 if (debug > 1)
1644 printk(KERN_INFO "%s: Unspecified error. Tx "
1645 "threshold now %2.2x.\n",
1646 dev->name, rp->tx_thresh);
1647 }
1648 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1649 IntrTxError))
1650 rhine_restart_tx(dev);
1651
1652 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1653 IntrTxError | IntrTxAborted | IntrNormalSummary |
1654 IntrTxDescRace)) {
1655 if (debug > 1)
1656 printk(KERN_ERR "%s: Something Wicked happened! "
1657 "%8.8x.\n", dev->name, intr_status);
1658 }
1659
1660 spin_unlock(&rp->lock);
1661}
1662
1663static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1664{
1665 struct rhine_private *rp = netdev_priv(dev);
1666 void __iomem *ioaddr = rp->base;
1667 unsigned long flags;
1668
1669 spin_lock_irqsave(&rp->lock, flags);
1670 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1671 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1672 clear_tally_counters(ioaddr);
1673 spin_unlock_irqrestore(&rp->lock, flags);
1674
1675 return &rp->stats;
1676}
1677
1678static void rhine_set_rx_mode(struct net_device *dev)
1679{
1680 struct rhine_private *rp = netdev_priv(dev);
1681 void __iomem *ioaddr = rp->base;
1682 u32 mc_filter[2]; /* Multicast hash filter */
1683 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1684
1685 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1da177e4
LT
1686 rx_mode = 0x1C;
1687 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1688 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1689 } else if ((dev->mc_count > multicast_filter_limit)
1690 || (dev->flags & IFF_ALLMULTI)) {
1691 /* Too many to match, or accept all multicasts. */
1692 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1693 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1694 rx_mode = 0x0C;
1695 } else {
1696 struct dev_mc_list *mclist;
1697 int i;
1698 memset(mc_filter, 0, sizeof(mc_filter));
1699 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1700 i++, mclist = mclist->next) {
1701 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1702
1703 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1704 }
1705 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1706 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1707 rx_mode = 0x0C;
1708 }
1709 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1710}
1711
1712static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1713{
1714 struct rhine_private *rp = netdev_priv(dev);
1715
1716 strcpy(info->driver, DRV_NAME);
1717 strcpy(info->version, DRV_VERSION);
1718 strcpy(info->bus_info, pci_name(rp->pdev));
1719}
1720
1721static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1722{
1723 struct rhine_private *rp = netdev_priv(dev);
1724 int rc;
1725
1726 spin_lock_irq(&rp->lock);
1727 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1728 spin_unlock_irq(&rp->lock);
1729
1730 return rc;
1731}
1732
1733static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1734{
1735 struct rhine_private *rp = netdev_priv(dev);
1736 int rc;
1737
1738 spin_lock_irq(&rp->lock);
1739 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1740 spin_unlock_irq(&rp->lock);
00b428c2 1741 rhine_set_carrier(&rp->mii_if);
1da177e4
LT
1742
1743 return rc;
1744}
1745
1746static int netdev_nway_reset(struct net_device *dev)
1747{
1748 struct rhine_private *rp = netdev_priv(dev);
1749
1750 return mii_nway_restart(&rp->mii_if);
1751}
1752
1753static u32 netdev_get_link(struct net_device *dev)
1754{
1755 struct rhine_private *rp = netdev_priv(dev);
1756
1757 return mii_link_ok(&rp->mii_if);
1758}
1759
1760static u32 netdev_get_msglevel(struct net_device *dev)
1761{
1762 return debug;
1763}
1764
1765static void netdev_set_msglevel(struct net_device *dev, u32 value)
1766{
1767 debug = value;
1768}
1769
1770static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1771{
1772 struct rhine_private *rp = netdev_priv(dev);
1773
1774 if (!(rp->quirks & rqWOL))
1775 return;
1776
1777 spin_lock_irq(&rp->lock);
1778 wol->supported = WAKE_PHY | WAKE_MAGIC |
1779 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1780 wol->wolopts = rp->wolopts;
1781 spin_unlock_irq(&rp->lock);
1782}
1783
1784static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1785{
1786 struct rhine_private *rp = netdev_priv(dev);
1787 u32 support = WAKE_PHY | WAKE_MAGIC |
1788 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1789
1790 if (!(rp->quirks & rqWOL))
1791 return -EINVAL;
1792
1793 if (wol->wolopts & ~support)
1794 return -EINVAL;
1795
1796 spin_lock_irq(&rp->lock);
1797 rp->wolopts = wol->wolopts;
1798 spin_unlock_irq(&rp->lock);
1799
1800 return 0;
1801}
1802
7282d491 1803static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
1804 .get_drvinfo = netdev_get_drvinfo,
1805 .get_settings = netdev_get_settings,
1806 .set_settings = netdev_set_settings,
1807 .nway_reset = netdev_nway_reset,
1808 .get_link = netdev_get_link,
1809 .get_msglevel = netdev_get_msglevel,
1810 .set_msglevel = netdev_set_msglevel,
1811 .get_wol = rhine_get_wol,
1812 .set_wol = rhine_set_wol,
1da177e4
LT
1813};
1814
1815static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1816{
1817 struct rhine_private *rp = netdev_priv(dev);
1818 int rc;
1819
1820 if (!netif_running(dev))
1821 return -EINVAL;
1822
1823 spin_lock_irq(&rp->lock);
1824 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1825 spin_unlock_irq(&rp->lock);
00b428c2 1826 rhine_set_carrier(&rp->mii_if);
1da177e4
LT
1827
1828 return rc;
1829}
1830
1831static int rhine_close(struct net_device *dev)
1832{
1833 struct rhine_private *rp = netdev_priv(dev);
1834 void __iomem *ioaddr = rp->base;
1835
1836 spin_lock_irq(&rp->lock);
1837
1838 netif_stop_queue(dev);
bea3348e
SH
1839#ifdef CONFIG_VIA_RHINE_NAPI
1840 napi_disable(&rp->napi);
1841#endif
1da177e4
LT
1842
1843 if (debug > 1)
1844 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1845 "status was %4.4x.\n",
1846 dev->name, ioread16(ioaddr + ChipCmd));
1847
1848 /* Switch to loopback mode to avoid hardware races. */
1849 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1850
1851 /* Disable interrupts by clearing the interrupt mask. */
1852 iowrite16(0x0000, ioaddr + IntrEnable);
1853
1854 /* Stop the chip's Tx and Rx processes. */
1855 iowrite16(CmdStop, ioaddr + ChipCmd);
1856
1857 spin_unlock_irq(&rp->lock);
1858
1859 free_irq(rp->pdev->irq, dev);
1860 free_rbufs(dev);
1861 free_tbufs(dev);
1862 free_ring(dev);
1863
1864 return 0;
1865}
1866
1867
1868static void __devexit rhine_remove_one(struct pci_dev *pdev)
1869{
1870 struct net_device *dev = pci_get_drvdata(pdev);
1871 struct rhine_private *rp = netdev_priv(dev);
1872
1873 unregister_netdev(dev);
1874
1875 pci_iounmap(pdev, rp->base);
1876 pci_release_regions(pdev);
1877
1878 free_netdev(dev);
1879 pci_disable_device(pdev);
1880 pci_set_drvdata(pdev, NULL);
1881}
1882
d18c3db5 1883static void rhine_shutdown (struct pci_dev *pdev)
1da177e4 1884{
1da177e4
LT
1885 struct net_device *dev = pci_get_drvdata(pdev);
1886 struct rhine_private *rp = netdev_priv(dev);
1887 void __iomem *ioaddr = rp->base;
1888
1889 if (!(rp->quirks & rqWOL))
1890 return; /* Nothing to do for non-WOL adapters */
1891
1892 rhine_power_init(dev);
1893
1894 /* Make sure we use pattern 0, 1 and not 4, 5 */
1895 if (rp->quirks & rq6patterns)
f11cf25e 1896 iowrite8(0x04, ioaddr + WOLcgClr);
1da177e4
LT
1897
1898 if (rp->wolopts & WAKE_MAGIC) {
1899 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1900 /*
1901 * Turn EEPROM-controlled wake-up back on -- some hardware may
1902 * not cooperate otherwise.
1903 */
1904 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1905 }
1906
1907 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1908 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1909
1910 if (rp->wolopts & WAKE_PHY)
1911 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1912
1913 if (rp->wolopts & WAKE_UCAST)
1914 iowrite8(WOLucast, ioaddr + WOLcrSet);
1915
1916 if (rp->wolopts) {
1917 /* Enable legacy WOL (for old motherboards) */
1918 iowrite8(0x01, ioaddr + PwcfgSet);
1919 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1920 }
1921
1922 /* Hit power state D3 (sleep) */
b933b4d9
RL
1923 if (!avoid_D3)
1924 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1da177e4
LT
1925
1926 /* TODO: Check use of pci_enable_wake() */
1927
1928}
1929
1930#ifdef CONFIG_PM
1931static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1932{
1933 struct net_device *dev = pci_get_drvdata(pdev);
1934 struct rhine_private *rp = netdev_priv(dev);
1935 unsigned long flags;
1936
1937 if (!netif_running(dev))
1938 return 0;
1939
bea3348e
SH
1940#ifdef CONFIG_VIA_RHINE_NAPI
1941 napi_disable(&rp->napi);
1942#endif
1da177e4
LT
1943 netif_device_detach(dev);
1944 pci_save_state(pdev);
1945
1946 spin_lock_irqsave(&rp->lock, flags);
d18c3db5 1947 rhine_shutdown(pdev);
1da177e4
LT
1948 spin_unlock_irqrestore(&rp->lock, flags);
1949
1950 free_irq(dev->irq, dev);
1951 return 0;
1952}
1953
1954static int rhine_resume(struct pci_dev *pdev)
1955{
1956 struct net_device *dev = pci_get_drvdata(pdev);
1957 struct rhine_private *rp = netdev_priv(dev);
1958 unsigned long flags;
1959 int ret;
1960
1961 if (!netif_running(dev))
1962 return 0;
1963
1fb9df5d 1964 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1da177e4
LT
1965 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1966
1967 ret = pci_set_power_state(pdev, PCI_D0);
1968 if (debug > 1)
1969 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1970 dev->name, ret ? "failed" : "succeeded", ret);
1971
1972 pci_restore_state(pdev);
1973
1974 spin_lock_irqsave(&rp->lock, flags);
1975#ifdef USE_MMIO
1976 enable_mmio(rp->pioaddr, rp->quirks);
1977#endif
1978 rhine_power_init(dev);
1979 free_tbufs(dev);
1980 free_rbufs(dev);
1981 alloc_tbufs(dev);
1982 alloc_rbufs(dev);
1983 init_registers(dev);
1984 spin_unlock_irqrestore(&rp->lock, flags);
1985
1986 netif_device_attach(dev);
1987
1988 return 0;
1989}
1990#endif /* CONFIG_PM */
1991
1992static struct pci_driver rhine_driver = {
1993 .name = DRV_NAME,
1994 .id_table = rhine_pci_tbl,
1995 .probe = rhine_init_one,
1996 .remove = __devexit_p(rhine_remove_one),
1997#ifdef CONFIG_PM
1998 .suspend = rhine_suspend,
1999 .resume = rhine_resume,
2000#endif /* CONFIG_PM */
d18c3db5 2001 .shutdown = rhine_shutdown,
1da177e4
LT
2002};
2003
e84df485
RL
2004static struct dmi_system_id __initdata rhine_dmi_table[] = {
2005 {
2006 .ident = "EPIA-M",
2007 .matches = {
2008 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2009 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2010 },
2011 },
2012 {
2013 .ident = "KV7",
2014 .matches = {
2015 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2016 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2017 },
2018 },
2019 { NULL }
2020};
1da177e4
LT
2021
2022static int __init rhine_init(void)
2023{
2024/* when a module, this is printed whether or not devices are found in probe */
2025#ifdef MODULE
2026 printk(version);
2027#endif
e84df485
RL
2028 if (dmi_check_system(rhine_dmi_table)) {
2029 /* these BIOSes fail at PXE boot if chip is in D3 */
2030 avoid_D3 = 1;
2031 printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 "
2032 "enabled.\n",
2033 DRV_NAME);
2034 }
2035 else if (avoid_D3)
2036 printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME);
2037
29917620 2038 return pci_register_driver(&rhine_driver);
1da177e4
LT
2039}
2040
2041
2042static void __exit rhine_cleanup(void)
2043{
2044 pci_unregister_driver(&rhine_driver);
2045}
2046
2047
2048module_init(rhine_init);
2049module_exit(rhine_cleanup);