[RT2x00]: add driver for Ralink wireless hardware
[linux-2.6-block.git] / drivers / net / via-rhine.c
CommitLineData
1da177e4
LT
1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
03a8c661 28 [link no longer provides useful info -jgarzik]
1da177e4
LT
29
30*/
31
32#define DRV_NAME "via-rhine"
e84df485
RL
33#define DRV_VERSION "1.4.3"
34#define DRV_RELDATE "2007-03-06"
1da177e4
LT
35
36
37/* A few user-configurable values.
38 These may be modified when a driver module is loaded. */
39
40static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41static int max_interrupt_work = 20;
42
43/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1518 effectively disables this feature. */
b47157f0
DM
45#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
46 || defined(CONFIG_SPARC) || defined(__ia64__) \
47 || defined(__sh__) || defined(__mips__)
48static int rx_copybreak = 1518;
49#else
1da177e4 50static int rx_copybreak;
b47157f0 51#endif
1da177e4 52
b933b4d9
RL
53/* Work-around for broken BIOSes: they are unable to get the chip back out of
54 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
55static int avoid_D3;
56
1da177e4
LT
57/*
58 * In case you are looking for 'options[]' or 'full_duplex[]', they
59 * are gone. Use ethtool(8) instead.
60 */
61
62/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 The Rhine has a 64 element 8390-like hash table. */
64static const int multicast_filter_limit = 32;
65
66
67/* Operational parameters that are set at compile time. */
68
69/* Keep the ring sizes a power of two for compile efficiency.
70 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
71 Making the Tx ring too large decreases the effectiveness of channel
72 bonding and packet priority.
73 There are no ill effects from too-large receive rings. */
74#define TX_RING_SIZE 16
75#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
633949a1
RL
76#ifdef CONFIG_VIA_RHINE_NAPI
77#define RX_RING_SIZE 64
78#else
1da177e4 79#define RX_RING_SIZE 16
633949a1 80#endif
1da177e4
LT
81
82
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT (2*HZ)
87
88#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
89
90#include <linux/module.h>
91#include <linux/moduleparam.h>
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/timer.h>
95#include <linux/errno.h>
96#include <linux/ioport.h>
97#include <linux/slab.h>
98#include <linux/interrupt.h>
99#include <linux/pci.h>
1e7f0bd8 100#include <linux/dma-mapping.h>
1da177e4
LT
101#include <linux/netdevice.h>
102#include <linux/etherdevice.h>
103#include <linux/skbuff.h>
104#include <linux/init.h>
105#include <linux/delay.h>
106#include <linux/mii.h>
107#include <linux/ethtool.h>
108#include <linux/crc32.h>
109#include <linux/bitops.h>
110#include <asm/processor.h> /* Processor type for cache alignment. */
111#include <asm/io.h>
112#include <asm/irq.h>
113#include <asm/uaccess.h>
e84df485 114#include <linux/dmi.h>
1da177e4
LT
115
116/* These identify the driver base version and may not be removed. */
117static char version[] __devinitdata =
118KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
119
120/* This driver was written to use PCI memory space. Some early versions
121 of the Rhine may only work correctly with I/O space accesses. */
122#ifdef CONFIG_VIA_RHINE_MMIO
123#define USE_MMIO
124#else
125#endif
126
127MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129MODULE_LICENSE("GPL");
130
131module_param(max_interrupt_work, int, 0);
132module_param(debug, int, 0);
133module_param(rx_copybreak, int, 0);
b933b4d9 134module_param(avoid_D3, bool, 0);
1da177e4
LT
135MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
136MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
b933b4d9 138MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
1da177e4
LT
139
140/*
141 Theory of Operation
142
143I. Board Compatibility
144
145This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
146controller.
147
148II. Board-specific settings
149
150Boards with this chip are functional only in a bus-master PCI slot.
151
152Many operational settings are loaded from the EEPROM to the Config word at
153offset 0x78. For most of these settings, this driver assumes that they are
154correct.
155If this driver is compiled to use PCI memory space operations the EEPROM
156must be configured to enable memory ops.
157
158III. Driver operation
159
160IIIa. Ring buffers
161
162This driver uses two statically allocated fixed-size descriptor lists
163formed into rings by a branch from the final descriptor to the beginning of
164the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
165
166IIIb/c. Transmit/Receive Structure
167
168This driver attempts to use a zero-copy receive and transmit scheme.
169
170Alas, all data buffers are required to start on a 32 bit boundary, so
171the driver must often copy transmit packets into bounce buffers.
172
173The driver allocates full frame size skbuffs for the Rx ring buffers at
174open() time and passes the skb->data field to the chip as receive data
175buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176a fresh skbuff is allocated and the frame is copied to the new skbuff.
177When the incoming frame is larger, the skbuff is passed directly up the
178protocol stack. Buffers consumed this way are replaced by newly allocated
179skbuffs in the last phase of rhine_rx().
180
181The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182using a full-sized skbuff for small frames vs. the copying costs of larger
183frames. New boards are typically used in generously configured machines
184and the underfilled buffers have negligible impact compared to the benefit of
185a single allocation size, so the default value of zero results in never
186copying packets. When copying is done, the cost is usually mitigated by using
187a combined copy/checksum routine. Copying also preloads the cache, which is
188most useful with small frames.
189
190Since the VIA chips are only able to transfer data to buffers on 32 bit
191boundaries, the IP header at offset 14 in an ethernet frame isn't
192longword aligned for further processing. Copying these unaligned buffers
193has the beneficial effect of 16-byte aligning the IP header.
194
195IIId. Synchronization
196
197The driver runs as two independent, single-threaded flows of control. One
198is the send-packet routine, which enforces single-threaded use by the
199dev->priv->lock spinlock. The other thread is the interrupt handler, which
200is single threaded by the hardware and interrupt handling software.
201
202The send packet thread has partial control over the Tx ring. It locks the
203dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
204is not available it stops the transmit queue by calling netif_stop_queue.
205
206The interrupt handler has exclusive control over the Rx ring and records stats
207from the Tx ring. After reaping the stats, it marks the Tx queue entry as
208empty by incrementing the dirty_tx mark. If at least half of the entries in
209the Rx ring are available the transmit queue is woken up if it was stopped.
210
211IV. Notes
212
213IVb. References
214
215Preliminary VT86C100A manual from http://www.via.com.tw/
216http://www.scyld.com/expert/100mbps.html
217http://www.scyld.com/expert/NWay.html
218ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
219ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
220
221
222IVc. Errata
223
224The VT86C100A manual is not reliable information.
225The 3043 chip does not handle unaligned transmit or receive buffers, resulting
226in significant performance degradation for bounce buffer copies on transmit
227and unaligned IP headers on receive.
228The chip does not pad to minimum transmit length.
229
230*/
231
232
233/* This table drives the PCI probe routines. It's mostly boilerplate in all
234 of the drivers, and will likely be provided by some future kernel.
235 Note the matching code -- the first table entry matchs all 56** cards but
236 second only the 1234 card.
237*/
238
239enum rhine_revs {
240 VT86C100A = 0x00,
241 VTunknown0 = 0x20,
242 VT6102 = 0x40,
243 VT8231 = 0x50, /* Integrated MAC */
244 VT8233 = 0x60, /* Integrated MAC */
245 VT8235 = 0x74, /* Integrated MAC */
246 VT8237 = 0x78, /* Integrated MAC */
247 VTunknown1 = 0x7C,
248 VT6105 = 0x80,
249 VT6105_B0 = 0x83,
250 VT6105L = 0x8A,
251 VT6107 = 0x8C,
252 VTunknown2 = 0x8E,
253 VT6105M = 0x90, /* Management adapter */
254};
255
256enum rhine_quirks {
257 rqWOL = 0x0001, /* Wake-On-LAN support */
258 rqForceReset = 0x0002,
259 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
260 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
261 rqRhineI = 0x0100, /* See comment below */
262};
263/*
264 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
265 * MMIO as well as for the collision counter and the Tx FIFO underflow
266 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
267 */
268
269/* Beware of PCI posted writes */
270#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
271
46009c8b
JG
272static const struct pci_device_id rhine_pci_tbl[] = {
273 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
274 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
275 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
276 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
1da177e4
LT
277 { } /* terminate list */
278};
279MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
280
281
282/* Offsets to the device registers. */
283enum register_offsets {
284 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
285 ChipCmd1=0x09,
286 IntrStatus=0x0C, IntrEnable=0x0E,
287 MulticastFilter0=0x10, MulticastFilter1=0x14,
288 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
289 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
290 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
291 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
292 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
293 StickyHW=0x83, IntrStatus2=0x84,
294 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
295 WOLcrClr1=0xA6, WOLcgClr=0xA7,
296 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
297};
298
299/* Bits in ConfigD */
300enum backoff_bits {
301 BackOptional=0x01, BackModify=0x02,
302 BackCaptureEffect=0x04, BackRandom=0x08
303};
304
305#ifdef USE_MMIO
306/* Registers we check that mmio and reg are the same. */
307static const int mmio_verify_registers[] = {
308 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
309 0
310};
311#endif
312
313/* Bits in the interrupt status/mask registers. */
314enum intr_status_bits {
315 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
316 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
317 IntrPCIErr=0x0040,
318 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
319 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
320 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
321 IntrRxWakeUp=0x8000,
322 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
323 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
324 IntrTxErrSummary=0x082218,
325};
326
327/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
328enum wol_bits {
329 WOLucast = 0x10,
330 WOLmagic = 0x20,
331 WOLbmcast = 0x30,
332 WOLlnkon = 0x40,
333 WOLlnkoff = 0x80,
334};
335
336/* The Rx and Tx buffer descriptors. */
337struct rx_desc {
338 s32 rx_status;
339 u32 desc_length; /* Chain flag, Buffer/frame length */
340 u32 addr;
341 u32 next_desc;
342};
343struct tx_desc {
344 s32 tx_status;
345 u32 desc_length; /* Chain flag, Tx Config, Frame length */
346 u32 addr;
347 u32 next_desc;
348};
349
350/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
351#define TXDESC 0x00e08000
352
353enum rx_status_bits {
354 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
355};
356
357/* Bits in *_desc.*_status */
358enum desc_status_bits {
359 DescOwn=0x80000000
360};
361
362/* Bits in ChipCmd. */
363enum chip_cmd_bits {
364 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
365 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
366 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
367 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
368};
369
370struct rhine_private {
371 /* Descriptor rings */
372 struct rx_desc *rx_ring;
373 struct tx_desc *tx_ring;
374 dma_addr_t rx_ring_dma;
375 dma_addr_t tx_ring_dma;
376
377 /* The addresses of receive-in-place skbuffs. */
378 struct sk_buff *rx_skbuff[RX_RING_SIZE];
379 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
380
381 /* The saved address of a sent-in-place packet/buffer, for later free(). */
382 struct sk_buff *tx_skbuff[TX_RING_SIZE];
383 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
384
4be5de25 385 /* Tx bounce buffers (Rhine-I only) */
1da177e4
LT
386 unsigned char *tx_buf[TX_RING_SIZE];
387 unsigned char *tx_bufs;
388 dma_addr_t tx_bufs_dma;
389
390 struct pci_dev *pdev;
391 long pioaddr;
bea3348e
SH
392 struct net_device *dev;
393 struct napi_struct napi;
1da177e4
LT
394 struct net_device_stats stats;
395 spinlock_t lock;
396
397 /* Frequently used values: keep some adjacent for cache effect. */
398 u32 quirks;
399 struct rx_desc *rx_head_desc;
400 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
401 unsigned int cur_tx, dirty_tx;
402 unsigned int rx_buf_sz; /* Based on MTU+slack. */
403 u8 wolopts;
404
405 u8 tx_thresh, rx_thresh;
406
407 struct mii_if_info mii_if;
408 void __iomem *base;
409};
410
411static int mdio_read(struct net_device *dev, int phy_id, int location);
412static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
413static int rhine_open(struct net_device *dev);
414static void rhine_tx_timeout(struct net_device *dev);
415static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
7d12e780 416static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
1da177e4 417static void rhine_tx(struct net_device *dev);
633949a1 418static int rhine_rx(struct net_device *dev, int limit);
1da177e4
LT
419static void rhine_error(struct net_device *dev, int intr_status);
420static void rhine_set_rx_mode(struct net_device *dev);
421static struct net_device_stats *rhine_get_stats(struct net_device *dev);
422static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
7282d491 423static const struct ethtool_ops netdev_ethtool_ops;
1da177e4 424static int rhine_close(struct net_device *dev);
d18c3db5 425static void rhine_shutdown (struct pci_dev *pdev);
1da177e4
LT
426
427#define RHINE_WAIT_FOR(condition) do { \
428 int i=1024; \
429 while (!(condition) && --i) \
430 ; \
431 if (debug > 1 && i < 512) \
432 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
433 DRV_NAME, 1024-i, __func__, __LINE__); \
434} while(0)
435
436static inline u32 get_intr_status(struct net_device *dev)
437{
438 struct rhine_private *rp = netdev_priv(dev);
439 void __iomem *ioaddr = rp->base;
440 u32 intr_status;
441
442 intr_status = ioread16(ioaddr + IntrStatus);
443 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
444 if (rp->quirks & rqStatusWBRace)
445 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
446 return intr_status;
447}
448
449/*
450 * Get power related registers into sane state.
451 * Notify user about past WOL event.
452 */
453static void rhine_power_init(struct net_device *dev)
454{
455 struct rhine_private *rp = netdev_priv(dev);
456 void __iomem *ioaddr = rp->base;
457 u16 wolstat;
458
459 if (rp->quirks & rqWOL) {
460 /* Make sure chip is in power state D0 */
461 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
462
463 /* Disable "force PME-enable" */
464 iowrite8(0x80, ioaddr + WOLcgClr);
465
466 /* Clear power-event config bits (WOL) */
467 iowrite8(0xFF, ioaddr + WOLcrClr);
468 /* More recent cards can manage two additional patterns */
469 if (rp->quirks & rq6patterns)
470 iowrite8(0x03, ioaddr + WOLcrClr1);
471
472 /* Save power-event status bits */
473 wolstat = ioread8(ioaddr + PwrcsrSet);
474 if (rp->quirks & rq6patterns)
475 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
476
477 /* Clear power-event status bits */
478 iowrite8(0xFF, ioaddr + PwrcsrClr);
479 if (rp->quirks & rq6patterns)
480 iowrite8(0x03, ioaddr + PwrcsrClr1);
481
482 if (wolstat) {
483 char *reason;
484 switch (wolstat) {
485 case WOLmagic:
486 reason = "Magic packet";
487 break;
488 case WOLlnkon:
489 reason = "Link went up";
490 break;
491 case WOLlnkoff:
492 reason = "Link went down";
493 break;
494 case WOLucast:
495 reason = "Unicast packet";
496 break;
497 case WOLbmcast:
498 reason = "Multicast/broadcast packet";
499 break;
500 default:
501 reason = "Unknown";
502 }
503 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
504 DRV_NAME, reason);
505 }
506 }
507}
508
509static void rhine_chip_reset(struct net_device *dev)
510{
511 struct rhine_private *rp = netdev_priv(dev);
512 void __iomem *ioaddr = rp->base;
513
514 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
515 IOSYNC;
516
517 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
518 printk(KERN_INFO "%s: Reset not complete yet. "
519 "Trying harder.\n", DRV_NAME);
520
521 /* Force reset */
522 if (rp->quirks & rqForceReset)
523 iowrite8(0x40, ioaddr + MiscCmd);
524
525 /* Reset can take somewhat longer (rare) */
526 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
527 }
528
529 if (debug > 1)
530 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
531 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
532 "failed" : "succeeded");
533}
534
535#ifdef USE_MMIO
536static void enable_mmio(long pioaddr, u32 quirks)
537{
538 int n;
539 if (quirks & rqRhineI) {
540 /* More recent docs say that this bit is reserved ... */
541 n = inb(pioaddr + ConfigA) | 0x20;
542 outb(n, pioaddr + ConfigA);
543 } else {
544 n = inb(pioaddr + ConfigD) | 0x80;
545 outb(n, pioaddr + ConfigD);
546 }
547}
548#endif
549
550/*
551 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
552 * (plus 0x6C for Rhine-I/II)
553 */
554static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
555{
556 struct rhine_private *rp = netdev_priv(dev);
557 void __iomem *ioaddr = rp->base;
558
559 outb(0x20, pioaddr + MACRegEEcsr);
560 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
561
562#ifdef USE_MMIO
563 /*
564 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
565 * MMIO. If reloading EEPROM was done first this could be avoided, but
566 * it is not known if that still works with the "win98-reboot" problem.
567 */
568 enable_mmio(pioaddr, rp->quirks);
569#endif
570
571 /* Turn off EEPROM-controlled wake-up (magic packet) */
572 if (rp->quirks & rqWOL)
573 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
574
575}
576
577#ifdef CONFIG_NET_POLL_CONTROLLER
578static void rhine_poll(struct net_device *dev)
579{
580 disable_irq(dev->irq);
7d12e780 581 rhine_interrupt(dev->irq, (void *)dev);
1da177e4
LT
582 enable_irq(dev->irq);
583}
584#endif
585
633949a1 586#ifdef CONFIG_VIA_RHINE_NAPI
bea3348e 587static int rhine_napipoll(struct napi_struct *napi, int budget)
633949a1 588{
bea3348e
SH
589 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
590 struct net_device *dev = rp->dev;
633949a1 591 void __iomem *ioaddr = rp->base;
bea3348e 592 int work_done;
633949a1 593
bea3348e 594 work_done = rhine_rx(dev, budget);
633949a1 595
bea3348e
SH
596 if (work_done < budget) {
597 netif_rx_complete(dev, napi);
633949a1
RL
598
599 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
600 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
601 IntrTxDone | IntrTxError | IntrTxUnderrun |
602 IntrPCIErr | IntrStatsMax | IntrLinkChange,
603 ioaddr + IntrEnable);
633949a1 604 }
bea3348e 605 return work_done;
633949a1
RL
606}
607#endif
608
1da177e4
LT
609static void rhine_hw_init(struct net_device *dev, long pioaddr)
610{
611 struct rhine_private *rp = netdev_priv(dev);
612
613 /* Reset the chip to erase previous misconfiguration. */
614 rhine_chip_reset(dev);
615
616 /* Rhine-I needs extra time to recuperate before EEPROM reload */
617 if (rp->quirks & rqRhineI)
618 msleep(5);
619
620 /* Reload EEPROM controlled bytes cleared by soft reset */
621 rhine_reload_eeprom(pioaddr, dev);
622}
623
624static int __devinit rhine_init_one(struct pci_dev *pdev,
625 const struct pci_device_id *ent)
626{
627 struct net_device *dev;
628 struct rhine_private *rp;
629 int i, rc;
1da177e4
LT
630 u32 quirks;
631 long pioaddr;
632 long memaddr;
633 void __iomem *ioaddr;
634 int io_size, phy_id;
635 const char *name;
636#ifdef USE_MMIO
637 int bar = 1;
638#else
639 int bar = 0;
640#endif
641
642/* when built into the kernel, we only print version if device is found */
643#ifndef MODULE
644 static int printed_version;
645 if (!printed_version++)
646 printk(version);
647#endif
648
1da177e4
LT
649 io_size = 256;
650 phy_id = 0;
651 quirks = 0;
652 name = "Rhine";
44c10138 653 if (pdev->revision < VTunknown0) {
1da177e4
LT
654 quirks = rqRhineI;
655 io_size = 128;
656 }
44c10138 657 else if (pdev->revision >= VT6102) {
1da177e4 658 quirks = rqWOL | rqForceReset;
44c10138 659 if (pdev->revision < VT6105) {
1da177e4
LT
660 name = "Rhine II";
661 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
662 }
663 else {
664 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
44c10138 665 if (pdev->revision >= VT6105_B0)
1da177e4 666 quirks |= rq6patterns;
44c10138 667 if (pdev->revision < VT6105M)
1da177e4
LT
668 name = "Rhine III";
669 else
670 name = "Rhine III (Management Adapter)";
671 }
672 }
673
674 rc = pci_enable_device(pdev);
675 if (rc)
676 goto err_out;
677
678 /* this should always be supported */
1e7f0bd8 679 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1da177e4
LT
680 if (rc) {
681 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
682 "the card!?\n");
683 goto err_out;
684 }
685
686 /* sanity check */
687 if ((pci_resource_len(pdev, 0) < io_size) ||
688 (pci_resource_len(pdev, 1) < io_size)) {
689 rc = -EIO;
690 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
691 goto err_out;
692 }
693
694 pioaddr = pci_resource_start(pdev, 0);
695 memaddr = pci_resource_start(pdev, 1);
696
697 pci_set_master(pdev);
698
699 dev = alloc_etherdev(sizeof(struct rhine_private));
700 if (!dev) {
701 rc = -ENOMEM;
702 printk(KERN_ERR "alloc_etherdev failed\n");
703 goto err_out;
704 }
1da177e4
LT
705 SET_NETDEV_DEV(dev, &pdev->dev);
706
707 rp = netdev_priv(dev);
bea3348e 708 rp->dev = dev;
1da177e4
LT
709 rp->quirks = quirks;
710 rp->pioaddr = pioaddr;
711 rp->pdev = pdev;
712
713 rc = pci_request_regions(pdev, DRV_NAME);
714 if (rc)
715 goto err_out_free_netdev;
716
717 ioaddr = pci_iomap(pdev, bar, io_size);
718 if (!ioaddr) {
719 rc = -EIO;
720 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
721 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
722 goto err_out_free_res;
723 }
724
725#ifdef USE_MMIO
726 enable_mmio(pioaddr, quirks);
727
728 /* Check that selected MMIO registers match the PIO ones */
729 i = 0;
730 while (mmio_verify_registers[i]) {
731 int reg = mmio_verify_registers[i++];
732 unsigned char a = inb(pioaddr+reg);
733 unsigned char b = readb(ioaddr+reg);
734 if (a != b) {
735 rc = -EIO;
736 printk(KERN_ERR "MMIO do not match PIO [%02x] "
737 "(%02x != %02x)\n", reg, a, b);
738 goto err_out_unmap;
739 }
740 }
741#endif /* USE_MMIO */
742
743 dev->base_addr = (unsigned long)ioaddr;
744 rp->base = ioaddr;
745
746 /* Get chip registers into a sane state */
747 rhine_power_init(dev);
748 rhine_hw_init(dev, pioaddr);
749
750 for (i = 0; i < 6; i++)
751 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
b81e8e1f 752 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4 753
b81e8e1f 754 if (!is_valid_ether_addr(dev->perm_addr)) {
1da177e4
LT
755 rc = -EIO;
756 printk(KERN_ERR "Invalid MAC address\n");
757 goto err_out_unmap;
758 }
759
760 /* For Rhine-I/II, phy_id is loaded from EEPROM */
761 if (!phy_id)
762 phy_id = ioread8(ioaddr + 0x6C);
763
764 dev->irq = pdev->irq;
765
766 spin_lock_init(&rp->lock);
767 rp->mii_if.dev = dev;
768 rp->mii_if.mdio_read = mdio_read;
769 rp->mii_if.mdio_write = mdio_write;
770 rp->mii_if.phy_id_mask = 0x1f;
771 rp->mii_if.reg_num_mask = 0x1f;
772
773 /* The chip-specific entries in the device structure. */
774 dev->open = rhine_open;
775 dev->hard_start_xmit = rhine_start_tx;
776 dev->stop = rhine_close;
777 dev->get_stats = rhine_get_stats;
778 dev->set_multicast_list = rhine_set_rx_mode;
779 dev->do_ioctl = netdev_ioctl;
780 dev->ethtool_ops = &netdev_ethtool_ops;
781 dev->tx_timeout = rhine_tx_timeout;
782 dev->watchdog_timeo = TX_TIMEOUT;
783#ifdef CONFIG_NET_POLL_CONTROLLER
784 dev->poll_controller = rhine_poll;
633949a1
RL
785#endif
786#ifdef CONFIG_VIA_RHINE_NAPI
bea3348e 787 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1da177e4
LT
788#endif
789 if (rp->quirks & rqRhineI)
790 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
791
792 /* dev->name not defined before register_netdev()! */
793 rc = register_netdev(dev);
794 if (rc)
795 goto err_out_unmap;
796
797 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
798 dev->name, name,
799#ifdef USE_MMIO
800 memaddr
801#else
802 (long)ioaddr
803#endif
804 );
805
806 for (i = 0; i < 5; i++)
807 printk("%2.2x:", dev->dev_addr[i]);
808 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
809
810 pci_set_drvdata(pdev, dev);
811
812 {
813 u16 mii_cmd;
814 int mii_status = mdio_read(dev, phy_id, 1);
815 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
816 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
817 if (mii_status != 0xffff && mii_status != 0x0000) {
818 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
819 printk(KERN_INFO "%s: MII PHY found at address "
820 "%d, status 0x%4.4x advertising %4.4x "
821 "Link %4.4x.\n", dev->name, phy_id,
822 mii_status, rp->mii_if.advertising,
823 mdio_read(dev, phy_id, 5));
824
825 /* set IFF_RUNNING */
826 if (mii_status & BMSR_LSTATUS)
827 netif_carrier_on(dev);
828 else
829 netif_carrier_off(dev);
830
831 }
832 }
833 rp->mii_if.phy_id = phy_id;
b933b4d9
RL
834 if (debug > 1 && avoid_D3)
835 printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
836 dev->name);
1da177e4
LT
837
838 return 0;
839
840err_out_unmap:
841 pci_iounmap(pdev, ioaddr);
842err_out_free_res:
843 pci_release_regions(pdev);
844err_out_free_netdev:
845 free_netdev(dev);
846err_out:
847 return rc;
848}
849
850static int alloc_ring(struct net_device* dev)
851{
852 struct rhine_private *rp = netdev_priv(dev);
853 void *ring;
854 dma_addr_t ring_dma;
855
856 ring = pci_alloc_consistent(rp->pdev,
857 RX_RING_SIZE * sizeof(struct rx_desc) +
858 TX_RING_SIZE * sizeof(struct tx_desc),
859 &ring_dma);
860 if (!ring) {
861 printk(KERN_ERR "Could not allocate DMA memory.\n");
862 return -ENOMEM;
863 }
864 if (rp->quirks & rqRhineI) {
865 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
866 PKT_BUF_SZ * TX_RING_SIZE,
867 &rp->tx_bufs_dma);
868 if (rp->tx_bufs == NULL) {
869 pci_free_consistent(rp->pdev,
870 RX_RING_SIZE * sizeof(struct rx_desc) +
871 TX_RING_SIZE * sizeof(struct tx_desc),
872 ring, ring_dma);
873 return -ENOMEM;
874 }
875 }
876
877 rp->rx_ring = ring;
878 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
879 rp->rx_ring_dma = ring_dma;
880 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
881
882 return 0;
883}
884
885static void free_ring(struct net_device* dev)
886{
887 struct rhine_private *rp = netdev_priv(dev);
888
889 pci_free_consistent(rp->pdev,
890 RX_RING_SIZE * sizeof(struct rx_desc) +
891 TX_RING_SIZE * sizeof(struct tx_desc),
892 rp->rx_ring, rp->rx_ring_dma);
893 rp->tx_ring = NULL;
894
895 if (rp->tx_bufs)
896 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
897 rp->tx_bufs, rp->tx_bufs_dma);
898
899 rp->tx_bufs = NULL;
900
901}
902
903static void alloc_rbufs(struct net_device *dev)
904{
905 struct rhine_private *rp = netdev_priv(dev);
906 dma_addr_t next;
907 int i;
908
909 rp->dirty_rx = rp->cur_rx = 0;
910
911 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
912 rp->rx_head_desc = &rp->rx_ring[0];
913 next = rp->rx_ring_dma;
914
915 /* Init the ring entries */
916 for (i = 0; i < RX_RING_SIZE; i++) {
917 rp->rx_ring[i].rx_status = 0;
918 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
919 next += sizeof(struct rx_desc);
920 rp->rx_ring[i].next_desc = cpu_to_le32(next);
921 rp->rx_skbuff[i] = NULL;
922 }
923 /* Mark the last entry as wrapping the ring. */
924 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
925
926 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
927 for (i = 0; i < RX_RING_SIZE; i++) {
928 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
929 rp->rx_skbuff[i] = skb;
930 if (skb == NULL)
931 break;
932 skb->dev = dev; /* Mark as being used by this device. */
933
934 rp->rx_skbuff_dma[i] =
689be439 935 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1da177e4
LT
936 PCI_DMA_FROMDEVICE);
937
938 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
939 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
940 }
941 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
942}
943
944static void free_rbufs(struct net_device* dev)
945{
946 struct rhine_private *rp = netdev_priv(dev);
947 int i;
948
949 /* Free all the skbuffs in the Rx queue. */
950 for (i = 0; i < RX_RING_SIZE; i++) {
951 rp->rx_ring[i].rx_status = 0;
952 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
953 if (rp->rx_skbuff[i]) {
954 pci_unmap_single(rp->pdev,
955 rp->rx_skbuff_dma[i],
956 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
957 dev_kfree_skb(rp->rx_skbuff[i]);
958 }
959 rp->rx_skbuff[i] = NULL;
960 }
961}
962
963static void alloc_tbufs(struct net_device* dev)
964{
965 struct rhine_private *rp = netdev_priv(dev);
966 dma_addr_t next;
967 int i;
968
969 rp->dirty_tx = rp->cur_tx = 0;
970 next = rp->tx_ring_dma;
971 for (i = 0; i < TX_RING_SIZE; i++) {
972 rp->tx_skbuff[i] = NULL;
973 rp->tx_ring[i].tx_status = 0;
974 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
975 next += sizeof(struct tx_desc);
976 rp->tx_ring[i].next_desc = cpu_to_le32(next);
4be5de25
RL
977 if (rp->quirks & rqRhineI)
978 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1da177e4
LT
979 }
980 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
981
982}
983
984static void free_tbufs(struct net_device* dev)
985{
986 struct rhine_private *rp = netdev_priv(dev);
987 int i;
988
989 for (i = 0; i < TX_RING_SIZE; i++) {
990 rp->tx_ring[i].tx_status = 0;
991 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
992 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
993 if (rp->tx_skbuff[i]) {
994 if (rp->tx_skbuff_dma[i]) {
995 pci_unmap_single(rp->pdev,
996 rp->tx_skbuff_dma[i],
997 rp->tx_skbuff[i]->len,
998 PCI_DMA_TODEVICE);
999 }
1000 dev_kfree_skb(rp->tx_skbuff[i]);
1001 }
1002 rp->tx_skbuff[i] = NULL;
1003 rp->tx_buf[i] = NULL;
1004 }
1005}
1006
1007static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1008{
1009 struct rhine_private *rp = netdev_priv(dev);
1010 void __iomem *ioaddr = rp->base;
1011
1012 mii_check_media(&rp->mii_if, debug, init_media);
1013
1014 if (rp->mii_if.full_duplex)
1015 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1016 ioaddr + ChipCmd1);
1017 else
1018 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1019 ioaddr + ChipCmd1);
00b428c2
RL
1020 if (debug > 1)
1021 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1022 rp->mii_if.force_media, netif_carrier_ok(dev));
1023}
1024
1025/* Called after status of force_media possibly changed */
0761be4f 1026static void rhine_set_carrier(struct mii_if_info *mii)
00b428c2
RL
1027{
1028 if (mii->force_media) {
1029 /* autoneg is off: Link is always assumed to be up */
1030 if (!netif_carrier_ok(mii->dev))
1031 netif_carrier_on(mii->dev);
1032 }
1033 else /* Let MMI library update carrier status */
1034 rhine_check_media(mii->dev, 0);
1035 if (debug > 1)
1036 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1037 mii->dev->name, mii->force_media,
1038 netif_carrier_ok(mii->dev));
1da177e4
LT
1039}
1040
1041static void init_registers(struct net_device *dev)
1042{
1043 struct rhine_private *rp = netdev_priv(dev);
1044 void __iomem *ioaddr = rp->base;
1045 int i;
1046
1047 for (i = 0; i < 6; i++)
1048 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1049
1050 /* Initialize other registers. */
1051 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1052 /* Configure initial FIFO thresholds. */
1053 iowrite8(0x20, ioaddr + TxConfig);
1054 rp->tx_thresh = 0x20;
1055 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1056
1057 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1058 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1059
1060 rhine_set_rx_mode(dev);
1061
bea3348e
SH
1062#ifdef CONFIG_VIA_RHINE_NAPI
1063 napi_enable(&rp->napi);
1064#endif
ab197668 1065
1da177e4
LT
1066 /* Enable interrupts by setting the interrupt mask. */
1067 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1068 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1069 IntrTxDone | IntrTxError | IntrTxUnderrun |
1070 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1071 ioaddr + IntrEnable);
1072
1073 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1074 ioaddr + ChipCmd);
1075 rhine_check_media(dev, 1);
1076}
1077
1078/* Enable MII link status auto-polling (required for IntrLinkChange) */
1079static void rhine_enable_linkmon(void __iomem *ioaddr)
1080{
1081 iowrite8(0, ioaddr + MIICmd);
1082 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1083 iowrite8(0x80, ioaddr + MIICmd);
1084
1085 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1086
1087 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1088}
1089
1090/* Disable MII link status auto-polling (required for MDIO access) */
1091static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1092{
1093 iowrite8(0, ioaddr + MIICmd);
1094
1095 if (quirks & rqRhineI) {
1096 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1097
38bb6b28
JL
1098 /* Can be called from ISR. Evil. */
1099 mdelay(1);
1da177e4
LT
1100
1101 /* 0x80 must be set immediately before turning it off */
1102 iowrite8(0x80, ioaddr + MIICmd);
1103
1104 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1105
1106 /* Heh. Now clear 0x80 again. */
1107 iowrite8(0, ioaddr + MIICmd);
1108 }
1109 else
1110 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1111}
1112
1113/* Read and write over the MII Management Data I/O (MDIO) interface. */
1114
1115static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1116{
1117 struct rhine_private *rp = netdev_priv(dev);
1118 void __iomem *ioaddr = rp->base;
1119 int result;
1120
1121 rhine_disable_linkmon(ioaddr, rp->quirks);
1122
1123 /* rhine_disable_linkmon already cleared MIICmd */
1124 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1125 iowrite8(regnum, ioaddr + MIIRegAddr);
1126 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1127 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1128 result = ioread16(ioaddr + MIIData);
1129
1130 rhine_enable_linkmon(ioaddr);
1131 return result;
1132}
1133
1134static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1135{
1136 struct rhine_private *rp = netdev_priv(dev);
1137 void __iomem *ioaddr = rp->base;
1138
1139 rhine_disable_linkmon(ioaddr, rp->quirks);
1140
1141 /* rhine_disable_linkmon already cleared MIICmd */
1142 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1143 iowrite8(regnum, ioaddr + MIIRegAddr);
1144 iowrite16(value, ioaddr + MIIData);
1145 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1146 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1147
1148 rhine_enable_linkmon(ioaddr);
1149}
1150
1151static int rhine_open(struct net_device *dev)
1152{
1153 struct rhine_private *rp = netdev_priv(dev);
1154 void __iomem *ioaddr = rp->base;
1155 int rc;
1156
1fb9df5d 1157 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
1da177e4
LT
1158 dev);
1159 if (rc)
1160 return rc;
1161
1162 if (debug > 1)
1163 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1164 dev->name, rp->pdev->irq);
1165
1166 rc = alloc_ring(dev);
1167 if (rc) {
1168 free_irq(rp->pdev->irq, dev);
1169 return rc;
1170 }
1171 alloc_rbufs(dev);
1172 alloc_tbufs(dev);
1173 rhine_chip_reset(dev);
1174 init_registers(dev);
1175 if (debug > 2)
1176 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1177 "MII status: %4.4x.\n",
1178 dev->name, ioread16(ioaddr + ChipCmd),
1179 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1180
1181 netif_start_queue(dev);
1182
1183 return 0;
1184}
1185
1186static void rhine_tx_timeout(struct net_device *dev)
1187{
1188 struct rhine_private *rp = netdev_priv(dev);
1189 void __iomem *ioaddr = rp->base;
1190
1191 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1192 "%4.4x, resetting...\n",
1193 dev->name, ioread16(ioaddr + IntrStatus),
1194 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1195
1196 /* protect against concurrent rx interrupts */
1197 disable_irq(rp->pdev->irq);
1198
bea3348e
SH
1199#ifdef CONFIG_VIA_RHINE_NAPI
1200 napi_disable(&rp->napi);
1201#endif
1202
1da177e4
LT
1203 spin_lock(&rp->lock);
1204
1205 /* clear all descriptors */
1206 free_tbufs(dev);
1207 free_rbufs(dev);
1208 alloc_tbufs(dev);
1209 alloc_rbufs(dev);
1210
1211 /* Reinitialize the hardware. */
1212 rhine_chip_reset(dev);
1213 init_registers(dev);
1214
1215 spin_unlock(&rp->lock);
1216 enable_irq(rp->pdev->irq);
1217
1218 dev->trans_start = jiffies;
1219 rp->stats.tx_errors++;
1220 netif_wake_queue(dev);
1221}
1222
1223static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1224{
1225 struct rhine_private *rp = netdev_priv(dev);
1226 void __iomem *ioaddr = rp->base;
1227 unsigned entry;
1228
1229 /* Caution: the write order is important here, set the field
1230 with the "ownership" bits last. */
1231
1232 /* Calculate the next Tx descriptor entry. */
1233 entry = rp->cur_tx % TX_RING_SIZE;
1234
5b057c6b
HX
1235 if (skb_padto(skb, ETH_ZLEN))
1236 return 0;
1da177e4
LT
1237
1238 rp->tx_skbuff[entry] = skb;
1239
1240 if ((rp->quirks & rqRhineI) &&
84fa7933 1241 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1da177e4
LT
1242 /* Must use alignment buffer. */
1243 if (skb->len > PKT_BUF_SZ) {
1244 /* packet too long, drop it */
1245 dev_kfree_skb(skb);
1246 rp->tx_skbuff[entry] = NULL;
1247 rp->stats.tx_dropped++;
1248 return 0;
1249 }
3e0d167a
CB
1250
1251 /* Padding is not copied and so must be redone. */
1da177e4 1252 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
3e0d167a
CB
1253 if (skb->len < ETH_ZLEN)
1254 memset(rp->tx_buf[entry] + skb->len, 0,
1255 ETH_ZLEN - skb->len);
1da177e4
LT
1256 rp->tx_skbuff_dma[entry] = 0;
1257 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1258 (rp->tx_buf[entry] -
1259 rp->tx_bufs));
1260 } else {
1261 rp->tx_skbuff_dma[entry] =
1262 pci_map_single(rp->pdev, skb->data, skb->len,
1263 PCI_DMA_TODEVICE);
1264 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1265 }
1266
1267 rp->tx_ring[entry].desc_length =
1268 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1269
1270 /* lock eth irq */
1271 spin_lock_irq(&rp->lock);
1272 wmb();
1273 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1274 wmb();
1275
1276 rp->cur_tx++;
1277
1278 /* Non-x86 Todo: explicitly flush cache lines here. */
1279
1280 /* Wake the potentially-idle transmit channel */
1281 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1282 ioaddr + ChipCmd1);
1283 IOSYNC;
1284
1285 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1286 netif_stop_queue(dev);
1287
1288 dev->trans_start = jiffies;
1289
1290 spin_unlock_irq(&rp->lock);
1291
1292 if (debug > 4) {
1293 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1294 dev->name, rp->cur_tx-1, entry);
1295 }
1296 return 0;
1297}
1298
1299/* The interrupt handler does all of the Rx thread work and cleans up
1300 after the Tx thread. */
7d12e780 1301static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1da177e4
LT
1302{
1303 struct net_device *dev = dev_instance;
1304 struct rhine_private *rp = netdev_priv(dev);
1305 void __iomem *ioaddr = rp->base;
1306 u32 intr_status;
1307 int boguscnt = max_interrupt_work;
1308 int handled = 0;
1309
1310 while ((intr_status = get_intr_status(dev))) {
1311 handled = 1;
1312
1313 /* Acknowledge all of the current interrupt sources ASAP. */
1314 if (intr_status & IntrTxDescRace)
1315 iowrite8(0x08, ioaddr + IntrStatus2);
1316 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1317 IOSYNC;
1318
1319 if (debug > 4)
1320 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1321 dev->name, intr_status);
1322
1323 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
633949a1
RL
1324 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1325#ifdef CONFIG_VIA_RHINE_NAPI
1326 iowrite16(IntrTxAborted |
1327 IntrTxDone | IntrTxError | IntrTxUnderrun |
1328 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1329 ioaddr + IntrEnable);
1330
bea3348e 1331 netif_rx_schedule(dev, &rp->napi);
633949a1
RL
1332#else
1333 rhine_rx(dev, RX_RING_SIZE);
1334#endif
1335 }
1da177e4
LT
1336
1337 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1338 if (intr_status & IntrTxErrSummary) {
1339 /* Avoid scavenging before Tx engine turned off */
1340 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1341 if (debug > 2 &&
1342 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1343 printk(KERN_WARNING "%s: "
1344 "rhine_interrupt() Tx engine"
1345 "still on.\n", dev->name);
1346 }
1347 rhine_tx(dev);
1348 }
1349
1350 /* Abnormal error summary/uncommon events handlers. */
1351 if (intr_status & (IntrPCIErr | IntrLinkChange |
1352 IntrStatsMax | IntrTxError | IntrTxAborted |
1353 IntrTxUnderrun | IntrTxDescRace))
1354 rhine_error(dev, intr_status);
1355
1356 if (--boguscnt < 0) {
1357 printk(KERN_WARNING "%s: Too much work at interrupt, "
1358 "status=%#8.8x.\n",
1359 dev->name, intr_status);
1360 break;
1361 }
1362 }
1363
1364 if (debug > 3)
1365 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1366 dev->name, ioread16(ioaddr + IntrStatus));
1367 return IRQ_RETVAL(handled);
1368}
1369
1370/* This routine is logically part of the interrupt handler, but isolated
1371 for clarity. */
1372static void rhine_tx(struct net_device *dev)
1373{
1374 struct rhine_private *rp = netdev_priv(dev);
1375 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1376
1377 spin_lock(&rp->lock);
1378
1379 /* find and cleanup dirty tx descriptors */
1380 while (rp->dirty_tx != rp->cur_tx) {
1381 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1382 if (debug > 6)
ed4030d1 1383 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1da177e4
LT
1384 entry, txstatus);
1385 if (txstatus & DescOwn)
1386 break;
1387 if (txstatus & 0x8000) {
1388 if (debug > 1)
1389 printk(KERN_DEBUG "%s: Transmit error, "
1390 "Tx status %8.8x.\n",
1391 dev->name, txstatus);
1392 rp->stats.tx_errors++;
1393 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1394 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1395 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1396 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1397 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1398 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1399 rp->stats.tx_fifo_errors++;
1400 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1401 break; /* Keep the skb - we try again */
1402 }
1403 /* Transmitter restarted in 'abnormal' handler. */
1404 } else {
1405 if (rp->quirks & rqRhineI)
1406 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1407 else
1408 rp->stats.collisions += txstatus & 0x0F;
1409 if (debug > 6)
1410 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1411 (txstatus >> 3) & 0xF,
1412 txstatus & 0xF);
1413 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1414 rp->stats.tx_packets++;
1415 }
1416 /* Free the original skb. */
1417 if (rp->tx_skbuff_dma[entry]) {
1418 pci_unmap_single(rp->pdev,
1419 rp->tx_skbuff_dma[entry],
1420 rp->tx_skbuff[entry]->len,
1421 PCI_DMA_TODEVICE);
1422 }
1423 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1424 rp->tx_skbuff[entry] = NULL;
1425 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1426 }
1427 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1428 netif_wake_queue(dev);
1429
1430 spin_unlock(&rp->lock);
1431}
1432
633949a1
RL
1433/* Process up to limit frames from receive ring */
1434static int rhine_rx(struct net_device *dev, int limit)
1da177e4
LT
1435{
1436 struct rhine_private *rp = netdev_priv(dev);
633949a1 1437 int count;
1da177e4 1438 int entry = rp->cur_rx % RX_RING_SIZE;
1da177e4
LT
1439
1440 if (debug > 4) {
1441 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1442 dev->name, entry,
1443 le32_to_cpu(rp->rx_head_desc->rx_status));
1444 }
1445
1446 /* If EOP is set on the next entry, it's a new packet. Send it up. */
633949a1 1447 for (count = 0; count < limit; ++count) {
1da177e4
LT
1448 struct rx_desc *desc = rp->rx_head_desc;
1449 u32 desc_status = le32_to_cpu(desc->rx_status);
1450 int data_size = desc_status >> 16;
1451
633949a1
RL
1452 if (desc_status & DescOwn)
1453 break;
1454
1da177e4 1455 if (debug > 4)
ed4030d1 1456 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1da177e4 1457 desc_status);
633949a1 1458
1da177e4
LT
1459 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1460 if ((desc_status & RxWholePkt) != RxWholePkt) {
1461 printk(KERN_WARNING "%s: Oversized Ethernet "
1462 "frame spanned multiple buffers, entry "
1463 "%#x length %d status %8.8x!\n",
1464 dev->name, entry, data_size,
1465 desc_status);
1466 printk(KERN_WARNING "%s: Oversized Ethernet "
1467 "frame %p vs %p.\n", dev->name,
1468 rp->rx_head_desc, &rp->rx_ring[entry]);
1469 rp->stats.rx_length_errors++;
1470 } else if (desc_status & RxErr) {
1471 /* There was a error. */
1472 if (debug > 2)
ed4030d1 1473 printk(KERN_DEBUG "rhine_rx() Rx "
1da177e4
LT
1474 "error was %8.8x.\n",
1475 desc_status);
1476 rp->stats.rx_errors++;
1477 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1478 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1479 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1480 if (desc_status & 0x0002) {
1481 /* this can also be updated outside the interrupt handler */
1482 spin_lock(&rp->lock);
1483 rp->stats.rx_crc_errors++;
1484 spin_unlock(&rp->lock);
1485 }
1486 }
1487 } else {
1488 struct sk_buff *skb;
1489 /* Length should omit the CRC */
1490 int pkt_len = data_size - 4;
1491
1492 /* Check if the packet is long enough to accept without
1493 copying to a minimally-sized skbuff. */
1494 if (pkt_len < rx_copybreak &&
1495 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1da177e4
LT
1496 skb_reserve(skb, 2); /* 16 byte align the IP header */
1497 pci_dma_sync_single_for_cpu(rp->pdev,
1498 rp->rx_skbuff_dma[entry],
1499 rp->rx_buf_sz,
1500 PCI_DMA_FROMDEVICE);
1501
8c7b7faa 1502 skb_copy_to_linear_data(skb,
689be439 1503 rp->rx_skbuff[entry]->data,
8c7b7faa 1504 pkt_len);
1da177e4
LT
1505 skb_put(skb, pkt_len);
1506 pci_dma_sync_single_for_device(rp->pdev,
1507 rp->rx_skbuff_dma[entry],
1508 rp->rx_buf_sz,
1509 PCI_DMA_FROMDEVICE);
1510 } else {
1511 skb = rp->rx_skbuff[entry];
1512 if (skb == NULL) {
1513 printk(KERN_ERR "%s: Inconsistent Rx "
1514 "descriptor chain.\n",
1515 dev->name);
1516 break;
1517 }
1518 rp->rx_skbuff[entry] = NULL;
1519 skb_put(skb, pkt_len);
1520 pci_unmap_single(rp->pdev,
1521 rp->rx_skbuff_dma[entry],
1522 rp->rx_buf_sz,
1523 PCI_DMA_FROMDEVICE);
1524 }
1525 skb->protocol = eth_type_trans(skb, dev);
633949a1
RL
1526#ifdef CONFIG_VIA_RHINE_NAPI
1527 netif_receive_skb(skb);
1528#else
1da177e4 1529 netif_rx(skb);
633949a1 1530#endif
1da177e4
LT
1531 dev->last_rx = jiffies;
1532 rp->stats.rx_bytes += pkt_len;
1533 rp->stats.rx_packets++;
1534 }
1535 entry = (++rp->cur_rx) % RX_RING_SIZE;
1536 rp->rx_head_desc = &rp->rx_ring[entry];
1537 }
1538
1539 /* Refill the Rx ring buffers. */
1540 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1541 struct sk_buff *skb;
1542 entry = rp->dirty_rx % RX_RING_SIZE;
1543 if (rp->rx_skbuff[entry] == NULL) {
1544 skb = dev_alloc_skb(rp->rx_buf_sz);
1545 rp->rx_skbuff[entry] = skb;
1546 if (skb == NULL)
1547 break; /* Better luck next round. */
1548 skb->dev = dev; /* Mark as being used by this device. */
1549 rp->rx_skbuff_dma[entry] =
689be439 1550 pci_map_single(rp->pdev, skb->data,
1da177e4
LT
1551 rp->rx_buf_sz,
1552 PCI_DMA_FROMDEVICE);
1553 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1554 }
1555 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1556 }
633949a1
RL
1557
1558 return count;
1da177e4
LT
1559}
1560
1561/*
1562 * Clears the "tally counters" for CRC errors and missed frames(?).
1563 * It has been reported that some chips need a write of 0 to clear
1564 * these, for others the counters are set to 1 when written to and
1565 * instead cleared when read. So we clear them both ways ...
1566 */
1567static inline void clear_tally_counters(void __iomem *ioaddr)
1568{
1569 iowrite32(0, ioaddr + RxMissed);
1570 ioread16(ioaddr + RxCRCErrs);
1571 ioread16(ioaddr + RxMissed);
1572}
1573
1574static void rhine_restart_tx(struct net_device *dev) {
1575 struct rhine_private *rp = netdev_priv(dev);
1576 void __iomem *ioaddr = rp->base;
1577 int entry = rp->dirty_tx % TX_RING_SIZE;
1578 u32 intr_status;
1579
1580 /*
1581 * If new errors occured, we need to sort them out before doing Tx.
1582 * In that case the ISR will be back here RSN anyway.
1583 */
1584 intr_status = get_intr_status(dev);
1585
1586 if ((intr_status & IntrTxErrSummary) == 0) {
1587
1588 /* We know better than the chip where it should continue. */
1589 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1590 ioaddr + TxRingPtr);
1591
1592 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1593 ioaddr + ChipCmd);
1594 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1595 ioaddr + ChipCmd1);
1596 IOSYNC;
1597 }
1598 else {
1599 /* This should never happen */
1600 if (debug > 1)
1601 printk(KERN_WARNING "%s: rhine_restart_tx() "
1602 "Another error occured %8.8x.\n",
1603 dev->name, intr_status);
1604 }
1605
1606}
1607
1608static void rhine_error(struct net_device *dev, int intr_status)
1609{
1610 struct rhine_private *rp = netdev_priv(dev);
1611 void __iomem *ioaddr = rp->base;
1612
1613 spin_lock(&rp->lock);
1614
1615 if (intr_status & IntrLinkChange)
38bb6b28 1616 rhine_check_media(dev, 0);
1da177e4
LT
1617 if (intr_status & IntrStatsMax) {
1618 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1619 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1620 clear_tally_counters(ioaddr);
1621 }
1622 if (intr_status & IntrTxAborted) {
1623 if (debug > 1)
1624 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1625 dev->name, intr_status);
1626 }
1627 if (intr_status & IntrTxUnderrun) {
1628 if (rp->tx_thresh < 0xE0)
1629 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1630 if (debug > 1)
1631 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1632 "threshold now %2.2x.\n",
1633 dev->name, rp->tx_thresh);
1634 }
1635 if (intr_status & IntrTxDescRace) {
1636 if (debug > 2)
1637 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1638 dev->name);
1639 }
1640 if ((intr_status & IntrTxError) &&
1641 (intr_status & (IntrTxAborted |
1642 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1643 if (rp->tx_thresh < 0xE0) {
1644 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1645 }
1646 if (debug > 1)
1647 printk(KERN_INFO "%s: Unspecified error. Tx "
1648 "threshold now %2.2x.\n",
1649 dev->name, rp->tx_thresh);
1650 }
1651 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1652 IntrTxError))
1653 rhine_restart_tx(dev);
1654
1655 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1656 IntrTxError | IntrTxAborted | IntrNormalSummary |
1657 IntrTxDescRace)) {
1658 if (debug > 1)
1659 printk(KERN_ERR "%s: Something Wicked happened! "
1660 "%8.8x.\n", dev->name, intr_status);
1661 }
1662
1663 spin_unlock(&rp->lock);
1664}
1665
1666static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1667{
1668 struct rhine_private *rp = netdev_priv(dev);
1669 void __iomem *ioaddr = rp->base;
1670 unsigned long flags;
1671
1672 spin_lock_irqsave(&rp->lock, flags);
1673 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1674 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1675 clear_tally_counters(ioaddr);
1676 spin_unlock_irqrestore(&rp->lock, flags);
1677
1678 return &rp->stats;
1679}
1680
1681static void rhine_set_rx_mode(struct net_device *dev)
1682{
1683 struct rhine_private *rp = netdev_priv(dev);
1684 void __iomem *ioaddr = rp->base;
1685 u32 mc_filter[2]; /* Multicast hash filter */
1686 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1687
1688 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1da177e4
LT
1689 rx_mode = 0x1C;
1690 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1691 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1692 } else if ((dev->mc_count > multicast_filter_limit)
1693 || (dev->flags & IFF_ALLMULTI)) {
1694 /* Too many to match, or accept all multicasts. */
1695 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1696 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1697 rx_mode = 0x0C;
1698 } else {
1699 struct dev_mc_list *mclist;
1700 int i;
1701 memset(mc_filter, 0, sizeof(mc_filter));
1702 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1703 i++, mclist = mclist->next) {
1704 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1705
1706 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1707 }
1708 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1709 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1710 rx_mode = 0x0C;
1711 }
1712 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1713}
1714
1715static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1716{
1717 struct rhine_private *rp = netdev_priv(dev);
1718
1719 strcpy(info->driver, DRV_NAME);
1720 strcpy(info->version, DRV_VERSION);
1721 strcpy(info->bus_info, pci_name(rp->pdev));
1722}
1723
1724static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1725{
1726 struct rhine_private *rp = netdev_priv(dev);
1727 int rc;
1728
1729 spin_lock_irq(&rp->lock);
1730 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1731 spin_unlock_irq(&rp->lock);
1732
1733 return rc;
1734}
1735
1736static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1737{
1738 struct rhine_private *rp = netdev_priv(dev);
1739 int rc;
1740
1741 spin_lock_irq(&rp->lock);
1742 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1743 spin_unlock_irq(&rp->lock);
00b428c2 1744 rhine_set_carrier(&rp->mii_if);
1da177e4
LT
1745
1746 return rc;
1747}
1748
1749static int netdev_nway_reset(struct net_device *dev)
1750{
1751 struct rhine_private *rp = netdev_priv(dev);
1752
1753 return mii_nway_restart(&rp->mii_if);
1754}
1755
1756static u32 netdev_get_link(struct net_device *dev)
1757{
1758 struct rhine_private *rp = netdev_priv(dev);
1759
1760 return mii_link_ok(&rp->mii_if);
1761}
1762
1763static u32 netdev_get_msglevel(struct net_device *dev)
1764{
1765 return debug;
1766}
1767
1768static void netdev_set_msglevel(struct net_device *dev, u32 value)
1769{
1770 debug = value;
1771}
1772
1773static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1774{
1775 struct rhine_private *rp = netdev_priv(dev);
1776
1777 if (!(rp->quirks & rqWOL))
1778 return;
1779
1780 spin_lock_irq(&rp->lock);
1781 wol->supported = WAKE_PHY | WAKE_MAGIC |
1782 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1783 wol->wolopts = rp->wolopts;
1784 spin_unlock_irq(&rp->lock);
1785}
1786
1787static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1788{
1789 struct rhine_private *rp = netdev_priv(dev);
1790 u32 support = WAKE_PHY | WAKE_MAGIC |
1791 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1792
1793 if (!(rp->quirks & rqWOL))
1794 return -EINVAL;
1795
1796 if (wol->wolopts & ~support)
1797 return -EINVAL;
1798
1799 spin_lock_irq(&rp->lock);
1800 rp->wolopts = wol->wolopts;
1801 spin_unlock_irq(&rp->lock);
1802
1803 return 0;
1804}
1805
7282d491 1806static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
1807 .get_drvinfo = netdev_get_drvinfo,
1808 .get_settings = netdev_get_settings,
1809 .set_settings = netdev_set_settings,
1810 .nway_reset = netdev_nway_reset,
1811 .get_link = netdev_get_link,
1812 .get_msglevel = netdev_get_msglevel,
1813 .set_msglevel = netdev_set_msglevel,
1814 .get_wol = rhine_get_wol,
1815 .set_wol = rhine_set_wol,
1da177e4
LT
1816};
1817
1818static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1819{
1820 struct rhine_private *rp = netdev_priv(dev);
1821 int rc;
1822
1823 if (!netif_running(dev))
1824 return -EINVAL;
1825
1826 spin_lock_irq(&rp->lock);
1827 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1828 spin_unlock_irq(&rp->lock);
00b428c2 1829 rhine_set_carrier(&rp->mii_if);
1da177e4
LT
1830
1831 return rc;
1832}
1833
1834static int rhine_close(struct net_device *dev)
1835{
1836 struct rhine_private *rp = netdev_priv(dev);
1837 void __iomem *ioaddr = rp->base;
1838
1839 spin_lock_irq(&rp->lock);
1840
1841 netif_stop_queue(dev);
bea3348e
SH
1842#ifdef CONFIG_VIA_RHINE_NAPI
1843 napi_disable(&rp->napi);
1844#endif
1da177e4
LT
1845
1846 if (debug > 1)
1847 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1848 "status was %4.4x.\n",
1849 dev->name, ioread16(ioaddr + ChipCmd));
1850
1851 /* Switch to loopback mode to avoid hardware races. */
1852 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1853
1854 /* Disable interrupts by clearing the interrupt mask. */
1855 iowrite16(0x0000, ioaddr + IntrEnable);
1856
1857 /* Stop the chip's Tx and Rx processes. */
1858 iowrite16(CmdStop, ioaddr + ChipCmd);
1859
1860 spin_unlock_irq(&rp->lock);
1861
1862 free_irq(rp->pdev->irq, dev);
1863 free_rbufs(dev);
1864 free_tbufs(dev);
1865 free_ring(dev);
1866
1867 return 0;
1868}
1869
1870
1871static void __devexit rhine_remove_one(struct pci_dev *pdev)
1872{
1873 struct net_device *dev = pci_get_drvdata(pdev);
1874 struct rhine_private *rp = netdev_priv(dev);
1875
1876 unregister_netdev(dev);
1877
1878 pci_iounmap(pdev, rp->base);
1879 pci_release_regions(pdev);
1880
1881 free_netdev(dev);
1882 pci_disable_device(pdev);
1883 pci_set_drvdata(pdev, NULL);
1884}
1885
d18c3db5 1886static void rhine_shutdown (struct pci_dev *pdev)
1da177e4 1887{
1da177e4
LT
1888 struct net_device *dev = pci_get_drvdata(pdev);
1889 struct rhine_private *rp = netdev_priv(dev);
1890 void __iomem *ioaddr = rp->base;
1891
1892 if (!(rp->quirks & rqWOL))
1893 return; /* Nothing to do for non-WOL adapters */
1894
1895 rhine_power_init(dev);
1896
1897 /* Make sure we use pattern 0, 1 and not 4, 5 */
1898 if (rp->quirks & rq6patterns)
1899 iowrite8(0x04, ioaddr + 0xA7);
1900
1901 if (rp->wolopts & WAKE_MAGIC) {
1902 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1903 /*
1904 * Turn EEPROM-controlled wake-up back on -- some hardware may
1905 * not cooperate otherwise.
1906 */
1907 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1908 }
1909
1910 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1911 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1912
1913 if (rp->wolopts & WAKE_PHY)
1914 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1915
1916 if (rp->wolopts & WAKE_UCAST)
1917 iowrite8(WOLucast, ioaddr + WOLcrSet);
1918
1919 if (rp->wolopts) {
1920 /* Enable legacy WOL (for old motherboards) */
1921 iowrite8(0x01, ioaddr + PwcfgSet);
1922 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1923 }
1924
1925 /* Hit power state D3 (sleep) */
b933b4d9
RL
1926 if (!avoid_D3)
1927 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1da177e4
LT
1928
1929 /* TODO: Check use of pci_enable_wake() */
1930
1931}
1932
1933#ifdef CONFIG_PM
1934static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1935{
1936 struct net_device *dev = pci_get_drvdata(pdev);
1937 struct rhine_private *rp = netdev_priv(dev);
1938 unsigned long flags;
1939
1940 if (!netif_running(dev))
1941 return 0;
1942
bea3348e
SH
1943#ifdef CONFIG_VIA_RHINE_NAPI
1944 napi_disable(&rp->napi);
1945#endif
1da177e4
LT
1946 netif_device_detach(dev);
1947 pci_save_state(pdev);
1948
1949 spin_lock_irqsave(&rp->lock, flags);
d18c3db5 1950 rhine_shutdown(pdev);
1da177e4
LT
1951 spin_unlock_irqrestore(&rp->lock, flags);
1952
1953 free_irq(dev->irq, dev);
1954 return 0;
1955}
1956
1957static int rhine_resume(struct pci_dev *pdev)
1958{
1959 struct net_device *dev = pci_get_drvdata(pdev);
1960 struct rhine_private *rp = netdev_priv(dev);
1961 unsigned long flags;
1962 int ret;
1963
1964 if (!netif_running(dev))
1965 return 0;
1966
1fb9df5d 1967 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1da177e4
LT
1968 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1969
1970 ret = pci_set_power_state(pdev, PCI_D0);
1971 if (debug > 1)
1972 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1973 dev->name, ret ? "failed" : "succeeded", ret);
1974
1975 pci_restore_state(pdev);
1976
1977 spin_lock_irqsave(&rp->lock, flags);
1978#ifdef USE_MMIO
1979 enable_mmio(rp->pioaddr, rp->quirks);
1980#endif
1981 rhine_power_init(dev);
1982 free_tbufs(dev);
1983 free_rbufs(dev);
1984 alloc_tbufs(dev);
1985 alloc_rbufs(dev);
1986 init_registers(dev);
1987 spin_unlock_irqrestore(&rp->lock, flags);
1988
1989 netif_device_attach(dev);
1990
1991 return 0;
1992}
1993#endif /* CONFIG_PM */
1994
1995static struct pci_driver rhine_driver = {
1996 .name = DRV_NAME,
1997 .id_table = rhine_pci_tbl,
1998 .probe = rhine_init_one,
1999 .remove = __devexit_p(rhine_remove_one),
2000#ifdef CONFIG_PM
2001 .suspend = rhine_suspend,
2002 .resume = rhine_resume,
2003#endif /* CONFIG_PM */
d18c3db5 2004 .shutdown = rhine_shutdown,
1da177e4
LT
2005};
2006
e84df485
RL
2007static struct dmi_system_id __initdata rhine_dmi_table[] = {
2008 {
2009 .ident = "EPIA-M",
2010 .matches = {
2011 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2012 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2013 },
2014 },
2015 {
2016 .ident = "KV7",
2017 .matches = {
2018 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2019 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2020 },
2021 },
2022 { NULL }
2023};
1da177e4
LT
2024
2025static int __init rhine_init(void)
2026{
2027/* when a module, this is printed whether or not devices are found in probe */
2028#ifdef MODULE
2029 printk(version);
2030#endif
e84df485
RL
2031 if (dmi_check_system(rhine_dmi_table)) {
2032 /* these BIOSes fail at PXE boot if chip is in D3 */
2033 avoid_D3 = 1;
2034 printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 "
2035 "enabled.\n",
2036 DRV_NAME);
2037 }
2038 else if (avoid_D3)
2039 printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME);
2040
29917620 2041 return pci_register_driver(&rhine_driver);
1da177e4
LT
2042}
2043
2044
2045static void __exit rhine_cleanup(void)
2046{
2047 pci_unregister_driver(&rhine_driver);
2048}
2049
2050
2051module_init(rhine_init);
2052module_exit(rhine_cleanup);