Merge tag 'qcom-soc-for-4.7-2' into net-next
[linux-2.6-block.git] / drivers / net / ethernet / dec / tulip / winbond-840.c
CommitLineData
1da177e4
LT
1/* winbond-840.c: A Linux PCI network adapter device driver. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
19
20 Do not remove the copyright information.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
23 as an improvement.
24
25 Changelog:
26 * ported to 2.4
27 ???
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
35 * further cleanups
36 power management.
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
f3b197ac 41
1da177e4
LT
42 TODO:
43 * enable pci_power_off
44 * Wake-On-LAN
45*/
f3b197ac 46
163ef0b5
JP
47#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48
1da177e4 49#define DRV_NAME "winbond-840"
d5b20697
AG
50#define DRV_VERSION "1.01-e"
51#define DRV_RELDATE "Sep-11-2006"
1da177e4
LT
52
53
54/* Automatically extracted configuration info:
55probe-func: winbond840_probe
56config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
57
58c-help-name: Winbond W89c840 PCI Ethernet support
59c-help-symbol: CONFIG_WINBOND_840
60c-help: This driver is for the Winbond W89c840 chip. It also works with
61c-help: the TX9882 chip on the Compex RL100-ATX board.
f3b197ac 62c-help: More specific information and updates are available from
1da177e4
LT
63c-help: http://www.scyld.com/network/drivers.html
64*/
65
66/* The user-configurable values.
67 These may be modified when a driver module is loaded.*/
68
69static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
70static int max_interrupt_work = 20;
71/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
72 The '840 uses a 64 element hash table based on the Ethernet CRC. */
73static int multicast_filter_limit = 32;
74
75/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
76 Setting to > 1518 effectively disables this feature. */
77static int rx_copybreak;
78
79/* Used to pass the media type, etc.
80 Both 'options[]' and 'full_duplex[]' should exist for driver
81 interoperability.
82 The media type is usually passed in 'options[]'.
83*/
84#define MAX_UNITS 8 /* More are supported, limit only on options */
85static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87
88/* Operational parameters that are set at compile time. */
89
90/* Keep the ring sizes a power of two for compile efficiency.
91 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
92 Making the Tx ring too large decreases the effectiveness of channel
93 bonding and packet priority.
94 There are no ill effects from too-large receive rings. */
1da177e4
LT
95#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
96#define TX_QUEUE_LEN_RESTART 5
1da177e4
LT
97
98#define TX_BUFLIMIT (1024-128)
99
100/* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101 To avoid overflowing we don't queue again until we have room for a
102 full-size packet.
103 */
104#define TX_FIFO_SIZE (2048)
105#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
106
107
108/* Operational parameters that usually are not changed. */
109/* Time in jiffies before concluding the transmitter is hung. */
110#define TX_TIMEOUT (2*HZ)
111
1da177e4
LT
112/* Include files, designed to support most kernel versions 2.0.0 and later. */
113#include <linux/module.h>
114#include <linux/kernel.h>
115#include <linux/string.h>
116#include <linux/timer.h>
117#include <linux/errno.h>
118#include <linux/ioport.h>
1da177e4
LT
119#include <linux/interrupt.h>
120#include <linux/pci.h>
10a87fcf 121#include <linux/dma-mapping.h>
1da177e4
LT
122#include <linux/netdevice.h>
123#include <linux/etherdevice.h>
124#include <linux/skbuff.h>
125#include <linux/init.h>
126#include <linux/delay.h>
127#include <linux/ethtool.h>
128#include <linux/mii.h>
129#include <linux/rtnetlink.h>
130#include <linux/crc32.h>
131#include <linux/bitops.h>
132#include <asm/uaccess.h>
133#include <asm/processor.h> /* Processor type for cache alignment. */
134#include <asm/io.h>
135#include <asm/irq.h>
136
42eab567
GG
137#include "tulip.h"
138
48dd59e3
JG
139#undef PKT_BUF_SZ /* tulip.h also defines this */
140#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
141
1da177e4 142/* These identify the driver base version and may not be removed. */
03f54b3d 143static const char version[] __initconst =
1c3319fb 144 "v" DRV_VERSION " (2.4 port) "
03f54b3d 145 DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
ad361c98 146 " http://www.scyld.com/network/drivers.html\n";
1da177e4
LT
147
148MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
149MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
150MODULE_LICENSE("GPL");
151MODULE_VERSION(DRV_VERSION);
152
153module_param(max_interrupt_work, int, 0);
154module_param(debug, int, 0);
155module_param(rx_copybreak, int, 0);
156module_param(multicast_filter_limit, int, 0);
157module_param_array(options, int, NULL, 0);
158module_param_array(full_duplex, int, NULL, 0);
159MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
160MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
161MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
162MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
163MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
164MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
165
166/*
167 Theory of Operation
168
169I. Board Compatibility
170
171This driver is for the Winbond w89c840 chip.
172
173II. Board-specific settings
174
175None.
176
177III. Driver operation
178
179This chip is very similar to the Digital 21*4* "Tulip" family. The first
180twelve registers and the descriptor format are nearly identical. Read a
181Tulip manual for operational details.
182
183A significant difference is that the multicast filter and station address are
184stored in registers rather than loaded through a pseudo-transmit packet.
185
186Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
187full-sized packet we must use both data buffers in a descriptor. Thus the
188driver uses ring mode where descriptors are implicitly sequential in memory,
189rather than using the second descriptor address as a chain pointer to
190subsequent descriptors.
191
192IV. Notes
193
194If you are going to almost clone a Tulip, why not go all the way and avoid
195the need for a new driver?
196
197IVb. References
198
199http://www.scyld.com/expert/100mbps.html
200http://www.scyld.com/expert/NWay.html
201http://www.winbond.com.tw/
202
203IVc. Errata
204
205A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
206correctly detect a full FIFO, and queuing more than 2048 bytes may result in
207silent data corruption.
208
209Test with 'ping -s 10000' on a fast computer.
210
211*/
212
f3b197ac 213
1da177e4
LT
214
215/*
216 PCI probe table.
217*/
1da177e4 218enum chip_capability_flags {
1f1bd5fc
JG
219 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
220};
221
9baa3c34 222static const struct pci_device_id w840_pci_tbl[] = {
1da177e4
LT
223 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
224 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
225 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
1f1bd5fc 226 { }
1da177e4
LT
227};
228MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
229
c3d8e682
JG
230enum {
231 netdev_res_size = 128, /* size of PCI BAR resource */
232};
233
1da177e4
LT
234struct pci_id_info {
235 const char *name;
c3d8e682 236 int drv_flags; /* Driver use, intended as capability flags. */
1da177e4 237};
c3d8e682 238
779c1a85 239static const struct pci_id_info pci_id_tbl[] = {
c3d8e682
JG
240 { /* Sometime a Level-One switch card. */
241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
242 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
243 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
244 { } /* terminate list. */
1da177e4
LT
245};
246
247/* This driver was written to use PCI memory space, however some x86 systems
42eab567
GG
248 work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
249*/
1da177e4
LT
250
251/* Offsets to the Command and Status Registers, "CSRs".
252 While similar to the Tulip, these registers are longword aligned.
253 Note: It's not useful to define symbolic names for every register bit in
254 the device. The name can only partially document the semantics and make
255 the driver longer and more difficult to read.
256*/
257enum w840_offsets {
258 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
259 RxRingPtr=0x0C, TxRingPtr=0x10,
260 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
261 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
262 CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
263 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
264 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
265};
266
1da177e4
LT
267/* Bits in the NetworkConfig register. */
268enum rx_mode_bits {
42eab567
GG
269 AcceptErr=0x80,
270 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
271 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
1da177e4
LT
272};
273
274enum mii_reg_bits {
275 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
276 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
277};
278
279/* The Tulip Rx and Tx buffer descriptors. */
280struct w840_rx_desc {
281 s32 status;
282 s32 length;
283 u32 buffer1;
284 u32 buffer2;
285};
286
287struct w840_tx_desc {
288 s32 status;
289 s32 length;
290 u32 buffer1, buffer2;
291};
292
1da177e4
LT
293#define MII_CNT 1 /* winbond only supports one MII */
294struct netdev_private {
295 struct w840_rx_desc *rx_ring;
296 dma_addr_t rx_addr[RX_RING_SIZE];
297 struct w840_tx_desc *tx_ring;
298 dma_addr_t tx_addr[TX_RING_SIZE];
299 dma_addr_t ring_dma_addr;
300 /* The addresses of receive-in-place skbuffs. */
301 struct sk_buff* rx_skbuff[RX_RING_SIZE];
302 /* The saved address of a sent-in-place packet/buffer, for later free(). */
303 struct sk_buff* tx_skbuff[TX_RING_SIZE];
304 struct net_device_stats stats;
305 struct timer_list timer; /* Media monitoring timer. */
306 /* Frequently used values: keep some adjacent for cache effect. */
307 spinlock_t lock;
308 int chip_id, drv_flags;
309 struct pci_dev *pci_dev;
310 int csr6;
311 struct w840_rx_desc *rx_head_desc;
312 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
313 unsigned int rx_buf_sz; /* Based on MTU+slack. */
314 unsigned int cur_tx, dirty_tx;
315 unsigned int tx_q_bytes;
316 unsigned int tx_full; /* The Tx queue is full. */
317 /* MII transceiver section. */
318 int mii_cnt; /* MII device addresses. */
319 unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
320 u32 mii;
321 struct mii_if_info mii_if;
322 void __iomem *base_addr;
323};
324
325static int eeprom_read(void __iomem *ioaddr, int location);
326static int mdio_read(struct net_device *dev, int phy_id, int location);
327static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
328static int netdev_open(struct net_device *dev);
329static int update_link(struct net_device *dev);
330static void netdev_timer(unsigned long data);
331static void init_rxtx_rings(struct net_device *dev);
332static void free_rxtx_rings(struct netdev_private *np);
333static void init_registers(struct net_device *dev);
334static void tx_timeout(struct net_device *dev);
335static int alloc_ringdesc(struct net_device *dev);
336static void free_ringdesc(struct netdev_private *np);
ad096463 337static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
7d12e780 338static irqreturn_t intr_handler(int irq, void *dev_instance);
1da177e4
LT
339static void netdev_error(struct net_device *dev, int intr_status);
340static int netdev_rx(struct net_device *dev);
341static u32 __set_rx_mode(struct net_device *dev);
342static void set_rx_mode(struct net_device *dev);
343static struct net_device_stats *get_stats(struct net_device *dev);
344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
7282d491 345static const struct ethtool_ops netdev_ethtool_ops;
1da177e4
LT
346static int netdev_close(struct net_device *dev);
347
2a97e6b7
SH
348static const struct net_device_ops netdev_ops = {
349 .ndo_open = netdev_open,
350 .ndo_stop = netdev_close,
351 .ndo_start_xmit = start_tx,
352 .ndo_get_stats = get_stats,
afc4b13d 353 .ndo_set_rx_mode = set_rx_mode,
2a97e6b7
SH
354 .ndo_do_ioctl = netdev_ioctl,
355 .ndo_tx_timeout = tx_timeout,
356 .ndo_change_mtu = eth_change_mtu,
357 .ndo_set_mac_address = eth_mac_addr,
358 .ndo_validate_addr = eth_validate_addr,
359};
1da177e4 360
1dd06ae8 361static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4
LT
362{
363 struct net_device *dev;
364 struct netdev_private *np;
365 static int find_cnt;
366 int chip_idx = ent->driver_data;
367 int irq;
368 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
369 void __iomem *ioaddr;
1da177e4
LT
370
371 i = pci_enable_device(pdev);
372 if (i) return i;
373
374 pci_set_master(pdev);
375
376 irq = pdev->irq;
377
284901a9 378 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
163ef0b5
JP
379 pr_warn("Device %s disabled due to DMA limitations\n",
380 pci_name(pdev));
1da177e4
LT
381 return -EIO;
382 }
383 dev = alloc_etherdev(sizeof(*np));
384 if (!dev)
385 return -ENOMEM;
1da177e4
LT
386 SET_NETDEV_DEV(dev, &pdev->dev);
387
388 if (pci_request_regions(pdev, DRV_NAME))
389 goto err_out_netdev;
42eab567
GG
390
391 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
1da177e4
LT
392 if (!ioaddr)
393 goto err_out_free_res;
394
395 for (i = 0; i < 3; i++)
c559a5bc 396 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
1da177e4
LT
397
398 /* Reset the chip to erase previous misconfiguration.
399 No hold time required! */
400 iowrite32(0x00000001, ioaddr + PCIBusCfg);
401
1da177e4
LT
402 np = netdev_priv(dev);
403 np->pci_dev = pdev;
404 np->chip_id = chip_idx;
405 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
406 spin_lock_init(&np->lock);
407 np->mii_if.dev = dev;
408 np->mii_if.mdio_read = mdio_read;
409 np->mii_if.mdio_write = mdio_write;
410 np->base_addr = ioaddr;
f3b197ac 411
1da177e4
LT
412 pci_set_drvdata(pdev, dev);
413
414 if (dev->mem_start)
415 option = dev->mem_start;
416
417 /* The lower four bits are the media type. */
418 if (option > 0) {
419 if (option & 0x200)
420 np->mii_if.full_duplex = 1;
421 if (option & 15)
a1e37bc5
JP
422 dev_info(&dev->dev,
423 "ignoring user supplied media type %d",
424 option & 15);
1da177e4
LT
425 }
426 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
427 np->mii_if.full_duplex = 1;
428
429 if (np->mii_if.full_duplex)
430 np->mii_if.force_media = 1;
431
432 /* The chip-specific entries in the device structure. */
2a97e6b7 433 dev->netdev_ops = &netdev_ops;
1da177e4 434 dev->ethtool_ops = &netdev_ethtool_ops;
1da177e4
LT
435 dev->watchdog_timeo = TX_TIMEOUT;
436
437 i = register_netdev(dev);
438 if (i)
439 goto err_out_cleardev;
440
a1e37bc5
JP
441 dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
442 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
1da177e4
LT
443
444 if (np->drv_flags & CanHaveMII) {
445 int phy, phy_idx = 0;
446 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
447 int mii_status = mdio_read(dev, phy, MII_BMSR);
448 if (mii_status != 0xffff && mii_status != 0x0000) {
449 np->phys[phy_idx++] = phy;
450 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
451 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
452 mdio_read(dev, phy, MII_PHYSID2);
a1e37bc5
JP
453 dev_info(&dev->dev,
454 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
455 np->mii, phy, mii_status,
456 np->mii_if.advertising);
1da177e4
LT
457 }
458 }
459 np->mii_cnt = phy_idx;
460 np->mii_if.phy_id = np->phys[0];
461 if (phy_idx == 0) {
a1e37bc5
JP
462 dev_warn(&dev->dev,
463 "MII PHY not found -- this device may not operate correctly\n");
1da177e4
LT
464 }
465 }
466
467 find_cnt++;
468 return 0;
469
470err_out_cleardev:
1da177e4
LT
471 pci_iounmap(pdev, ioaddr);
472err_out_free_res:
473 pci_release_regions(pdev);
474err_out_netdev:
475 free_netdev (dev);
476 return -ENODEV;
477}
478
f3b197ac 479
1da177e4
LT
480/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
481 often serial bit streams generated by the host processor.
482 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
483
484/* Delay between EEPROM clock transitions.
485 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
486 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
487 made udelay() unreliable.
488 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
405bbe9f 489 deprecated.
1da177e4
LT
490*/
491#define eeprom_delay(ee_addr) ioread32(ee_addr)
492
493enum EEPROM_Ctrl_Bits {
494 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
495 EE_ChipSelect=0x801, EE_DataIn=0x08,
496};
497
498/* The EEPROM commands include the alway-set leading bit. */
499enum EEPROM_Cmds {
500 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
501};
502
503static int eeprom_read(void __iomem *addr, int location)
504{
505 int i;
506 int retval = 0;
507 void __iomem *ee_addr = addr + EECtrl;
508 int read_cmd = location | EE_ReadCmd;
509 iowrite32(EE_ChipSelect, ee_addr);
510
511 /* Shift the read command bits out. */
512 for (i = 10; i >= 0; i--) {
513 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
514 iowrite32(dataval, ee_addr);
515 eeprom_delay(ee_addr);
516 iowrite32(dataval | EE_ShiftClk, ee_addr);
517 eeprom_delay(ee_addr);
518 }
519 iowrite32(EE_ChipSelect, ee_addr);
520 eeprom_delay(ee_addr);
521
522 for (i = 16; i > 0; i--) {
523 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
524 eeprom_delay(ee_addr);
525 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
526 iowrite32(EE_ChipSelect, ee_addr);
527 eeprom_delay(ee_addr);
528 }
529
530 /* Terminate the EEPROM access. */
531 iowrite32(0, ee_addr);
532 return retval;
533}
534
535/* MII transceiver control section.
536 Read and write the MII registers using software-generated serial
537 MDIO protocol. See the MII specifications or DP83840A data sheet
538 for details.
539
540 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
541 met by back-to-back 33Mhz PCI cycles. */
542#define mdio_delay(mdio_addr) ioread32(mdio_addr)
543
544/* Set iff a MII transceiver on any interface requires mdio preamble.
545 This only set with older transceivers, so the extra
546 code size of a per-interface flag is not worthwhile. */
547static char mii_preamble_required = 1;
548
549#define MDIO_WRITE0 (MDIO_EnbOutput)
550#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
551
552/* Generate the preamble required for initial synchronization and
553 a few older transceivers. */
554static void mdio_sync(void __iomem *mdio_addr)
555{
556 int bits = 32;
557
558 /* Establish sync by sending at least 32 logic ones. */
559 while (--bits >= 0) {
560 iowrite32(MDIO_WRITE1, mdio_addr);
561 mdio_delay(mdio_addr);
562 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
563 mdio_delay(mdio_addr);
564 }
565}
566
567static int mdio_read(struct net_device *dev, int phy_id, int location)
568{
569 struct netdev_private *np = netdev_priv(dev);
570 void __iomem *mdio_addr = np->base_addr + MIICtrl;
571 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
572 int i, retval = 0;
573
574 if (mii_preamble_required)
575 mdio_sync(mdio_addr);
576
577 /* Shift the read command bits out. */
578 for (i = 15; i >= 0; i--) {
579 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
580
581 iowrite32(dataval, mdio_addr);
582 mdio_delay(mdio_addr);
583 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
584 mdio_delay(mdio_addr);
585 }
586 /* Read the two transition, 16 data, and wire-idle bits. */
587 for (i = 20; i > 0; i--) {
588 iowrite32(MDIO_EnbIn, mdio_addr);
589 mdio_delay(mdio_addr);
590 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
591 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
592 mdio_delay(mdio_addr);
593 }
594 return (retval>>1) & 0xffff;
595}
596
597static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
598{
599 struct netdev_private *np = netdev_priv(dev);
600 void __iomem *mdio_addr = np->base_addr + MIICtrl;
601 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
602 int i;
603
604 if (location == 4 && phy_id == np->phys[0])
605 np->mii_if.advertising = value;
606
607 if (mii_preamble_required)
608 mdio_sync(mdio_addr);
609
610 /* Shift the command bits out. */
611 for (i = 31; i >= 0; i--) {
612 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
613
614 iowrite32(dataval, mdio_addr);
615 mdio_delay(mdio_addr);
616 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
617 mdio_delay(mdio_addr);
618 }
619 /* Clear out extra bits. */
620 for (i = 2; i > 0; i--) {
621 iowrite32(MDIO_EnbIn, mdio_addr);
622 mdio_delay(mdio_addr);
623 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
624 mdio_delay(mdio_addr);
625 }
1da177e4
LT
626}
627
f3b197ac 628
1da177e4
LT
629static int netdev_open(struct net_device *dev)
630{
631 struct netdev_private *np = netdev_priv(dev);
632 void __iomem *ioaddr = np->base_addr;
c0bd55ef 633 const int irq = np->pci_dev->irq;
1da177e4
LT
634 int i;
635
636 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
637
638 netif_device_detach(dev);
c0bd55ef 639 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1da177e4
LT
640 if (i)
641 goto out_err;
642
643 if (debug > 1)
c0bd55ef 644 netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
1da177e4
LT
645
646 if((i=alloc_ringdesc(dev)))
647 goto out_err;
648
649 spin_lock_irq(&np->lock);
650 netif_device_attach(dev);
651 init_registers(dev);
652 spin_unlock_irq(&np->lock);
653
654 netif_start_queue(dev);
655 if (debug > 2)
726b65ad 656 netdev_dbg(dev, "Done netdev_open()\n");
1da177e4
LT
657
658 /* Set the timer to check for link beat. */
659 init_timer(&np->timer);
660 np->timer.expires = jiffies + 1*HZ;
661 np->timer.data = (unsigned long)dev;
c061b18d 662 np->timer.function = netdev_timer; /* timer handler */
1da177e4
LT
663 add_timer(&np->timer);
664 return 0;
665out_err:
666 netif_device_attach(dev);
667 return i;
668}
669
670#define MII_DAVICOM_DM9101 0x0181b800
671
672static int update_link(struct net_device *dev)
673{
674 struct netdev_private *np = netdev_priv(dev);
675 int duplex, fasteth, result, mii_reg;
676
677 /* BSMR */
678 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
679
680 if (mii_reg == 0xffff)
681 return np->csr6;
682 /* reread: the link status bit is sticky */
683 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
684 if (!(mii_reg & 0x4)) {
685 if (netif_carrier_ok(dev)) {
686 if (debug)
a1e37bc5
JP
687 dev_info(&dev->dev,
688 "MII #%d reports no link. Disabling watchdog\n",
689 np->phys[0]);
1da177e4
LT
690 netif_carrier_off(dev);
691 }
692 return np->csr6;
693 }
694 if (!netif_carrier_ok(dev)) {
695 if (debug)
a1e37bc5
JP
696 dev_info(&dev->dev,
697 "MII #%d link is back. Enabling watchdog\n",
698 np->phys[0]);
1da177e4
LT
699 netif_carrier_on(dev);
700 }
f3b197ac 701
1da177e4
LT
702 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
703 /* If the link partner doesn't support autonegotiation
704 * the MII detects it's abilities with the "parallel detection".
705 * Some MIIs update the LPA register to the result of the parallel
706 * detection, some don't.
707 * The Davicom PHY [at least 0181b800] doesn't.
708 * Instead bit 9 and 13 of the BMCR are updated to the result
709 * of the negotiation..
710 */
711 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
712 duplex = mii_reg & BMCR_FULLDPLX;
713 fasteth = mii_reg & BMCR_SPEED100;
714 } else {
715 int negotiated;
716 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
717 negotiated = mii_reg & np->mii_if.advertising;
718
719 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
720 fasteth = negotiated & 0x380;
721 }
722 duplex |= np->mii_if.force_media;
723 /* remove fastether and fullduplex */
724 result = np->csr6 & ~0x20000200;
725 if (duplex)
726 result |= 0x200;
727 if (fasteth)
728 result |= 0x20000000;
729 if (result != np->csr6 && debug)
a1e37bc5
JP
730 dev_info(&dev->dev,
731 "Setting %dMBit-%s-duplex based on MII#%d\n",
732 fasteth ? 100 : 10, duplex ? "full" : "half",
733 np->phys[0]);
1da177e4
LT
734 return result;
735}
736
737#define RXTX_TIMEOUT 2000
738static inline void update_csr6(struct net_device *dev, int new)
739{
740 struct netdev_private *np = netdev_priv(dev);
741 void __iomem *ioaddr = np->base_addr;
742 int limit = RXTX_TIMEOUT;
743
744 if (!netif_device_present(dev))
745 new = 0;
746 if (new==np->csr6)
747 return;
748 /* stop both Tx and Rx processes */
749 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
750 /* wait until they have really stopped */
751 for (;;) {
752 int csr5 = ioread32(ioaddr + IntrStatus);
753 int t;
754
755 t = (csr5 >> 17) & 0x07;
756 if (t==0||t==1) {
757 /* rx stopped */
758 t = (csr5 >> 20) & 0x07;
759 if (t==0||t==1)
760 break;
761 }
762
763 limit--;
764 if(!limit) {
a1e37bc5
JP
765 dev_info(&dev->dev,
766 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
1da177e4
LT
767 break;
768 }
769 udelay(1);
770 }
771 np->csr6 = new;
772 /* and restart them with the new configuration */
773 iowrite32(np->csr6, ioaddr + NetworkConfig);
774 if (new & 0x200)
775 np->mii_if.full_duplex = 1;
776}
777
778static void netdev_timer(unsigned long data)
779{
780 struct net_device *dev = (struct net_device *)data;
781 struct netdev_private *np = netdev_priv(dev);
782 void __iomem *ioaddr = np->base_addr;
783
784 if (debug > 2)
726b65ad
JP
785 netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
786 ioread32(ioaddr + IntrStatus),
787 ioread32(ioaddr + NetworkConfig));
1da177e4
LT
788 spin_lock_irq(&np->lock);
789 update_csr6(dev, update_link(dev));
790 spin_unlock_irq(&np->lock);
791 np->timer.expires = jiffies + 10*HZ;
792 add_timer(&np->timer);
793}
794
795static void init_rxtx_rings(struct net_device *dev)
796{
797 struct netdev_private *np = netdev_priv(dev);
798 int i;
799
800 np->rx_head_desc = &np->rx_ring[0];
801 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
802
803 /* Initial all Rx descriptors. */
804 for (i = 0; i < RX_RING_SIZE; i++) {
805 np->rx_ring[i].length = np->rx_buf_sz;
806 np->rx_ring[i].status = 0;
807 np->rx_skbuff[i] = NULL;
808 }
809 /* Mark the last entry as wrapping the ring. */
810 np->rx_ring[i-1].length |= DescEndRing;
811
812 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
813 for (i = 0; i < RX_RING_SIZE; i++) {
21a4e469 814 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1da177e4
LT
815 np->rx_skbuff[i] = skb;
816 if (skb == NULL)
817 break;
689be439 818 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
bb02aacc 819 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
1da177e4
LT
820
821 np->rx_ring[i].buffer1 = np->rx_addr[i];
42eab567 822 np->rx_ring[i].status = DescOwned;
1da177e4
LT
823 }
824
825 np->cur_rx = 0;
826 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
827
828 /* Initialize the Tx descriptors */
829 for (i = 0; i < TX_RING_SIZE; i++) {
830 np->tx_skbuff[i] = NULL;
831 np->tx_ring[i].status = 0;
832 }
833 np->tx_full = 0;
834 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
835
836 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
837 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
838 np->base_addr + TxRingPtr);
839
840}
841
842static void free_rxtx_rings(struct netdev_private* np)
843{
844 int i;
845 /* Free all the skbuffs in the Rx queue. */
846 for (i = 0; i < RX_RING_SIZE; i++) {
847 np->rx_ring[i].status = 0;
848 if (np->rx_skbuff[i]) {
849 pci_unmap_single(np->pci_dev,
850 np->rx_addr[i],
851 np->rx_skbuff[i]->len,
852 PCI_DMA_FROMDEVICE);
853 dev_kfree_skb(np->rx_skbuff[i]);
854 }
855 np->rx_skbuff[i] = NULL;
856 }
857 for (i = 0; i < TX_RING_SIZE; i++) {
858 if (np->tx_skbuff[i]) {
859 pci_unmap_single(np->pci_dev,
860 np->tx_addr[i],
861 np->tx_skbuff[i]->len,
862 PCI_DMA_TODEVICE);
863 dev_kfree_skb(np->tx_skbuff[i]);
864 }
865 np->tx_skbuff[i] = NULL;
866 }
867}
868
869static void init_registers(struct net_device *dev)
870{
871 struct netdev_private *np = netdev_priv(dev);
872 void __iomem *ioaddr = np->base_addr;
873 int i;
874
875 for (i = 0; i < 6; i++)
876 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
877
878 /* Initialize other registers. */
879#ifdef __BIG_ENDIAN
880 i = (1<<20); /* Big-endian descriptors */
881#else
882 i = 0;
883#endif
884 i |= (0x04<<2); /* skip length 4 u32 */
885 i |= 0x02; /* give Rx priority */
886
887 /* Configure the PCI bus bursts and FIFO thresholds.
888 486: Set 8 longword cache alignment, 8 longword burst.
889 586: Set 16 longword cache alignment, no burst limit.
890 Cache alignment bits 15:14 Burst length 13:8
891 0000 <not allowed> 0000 align to cache 0800 8 longwords
892 4000 8 longwords 0100 1 longword 1000 16 longwords
893 8000 16 longwords 0200 2 longwords 2000 32 longwords
894 C000 32 longwords 0400 4 longwords */
895
896#if defined (__i386__) && !defined(MODULE)
897 /* When not a module we can work around broken '486 PCI boards. */
898 if (boot_cpu_data.x86 <= 4) {
899 i |= 0x4800;
a1e37bc5
JP
900 dev_info(&dev->dev,
901 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
1da177e4
LT
902 } else {
903 i |= 0xE000;
904 }
905#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
906 i |= 0xE000;
98830dd0 907#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
1da177e4
LT
908 i |= 0x4800;
909#else
de927188 910 dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
1da177e4
LT
911 i |= 0x4800;
912#endif
913 iowrite32(i, ioaddr + PCIBusCfg);
914
915 np->csr6 = 0;
f3b197ac 916 /* 128 byte Tx threshold;
1da177e4
LT
917 Transmit on; Receive on; */
918 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
919
920 /* Clear and Enable interrupts by setting the interrupt mask. */
921 iowrite32(0x1A0F5, ioaddr + IntrStatus);
922 iowrite32(0x1A0F5, ioaddr + IntrEnable);
923
924 iowrite32(0, ioaddr + RxStartDemand);
925}
926
927static void tx_timeout(struct net_device *dev)
928{
929 struct netdev_private *np = netdev_priv(dev);
930 void __iomem *ioaddr = np->base_addr;
c0bd55ef 931 const int irq = np->pci_dev->irq;
1da177e4 932
a1e37bc5
JP
933 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
934 ioread32(ioaddr + IntrStatus));
1da177e4
LT
935
936 {
937 int i;
938 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
939 for (i = 0; i < RX_RING_SIZE; i++)
a1e37bc5
JP
940 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
941 printk(KERN_CONT "\n");
942 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
1da177e4 943 for (i = 0; i < TX_RING_SIZE; i++)
a1e37bc5
JP
944 printk(KERN_CONT " %08x", np->tx_ring[i].status);
945 printk(KERN_CONT "\n");
1da177e4 946 }
a1e37bc5
JP
947 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
948 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
949 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
1da177e4 950
c0bd55ef 951 disable_irq(irq);
1da177e4
LT
952 spin_lock_irq(&np->lock);
953 /*
954 * Under high load dirty_tx and the internal tx descriptor pointer
955 * come out of sync, thus perform a software reset and reinitialize
956 * everything.
957 */
958
959 iowrite32(1, np->base_addr+PCIBusCfg);
960 udelay(1);
961
962 free_rxtx_rings(np);
963 init_rxtx_rings(dev);
964 init_registers(dev);
965 spin_unlock_irq(&np->lock);
c0bd55ef 966 enable_irq(irq);
1da177e4
LT
967
968 netif_wake_queue(dev);
860e9538 969 netif_trans_update(dev); /* prevent tx timeout */
1da177e4 970 np->stats.tx_errors++;
1da177e4
LT
971}
972
973/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
974static int alloc_ringdesc(struct net_device *dev)
975{
976 struct netdev_private *np = netdev_priv(dev);
977
978 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
979
980 np->rx_ring = pci_alloc_consistent(np->pci_dev,
981 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
982 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
983 &np->ring_dma_addr);
984 if(!np->rx_ring)
985 return -ENOMEM;
986 init_rxtx_rings(dev);
987 return 0;
988}
989
990static void free_ringdesc(struct netdev_private *np)
991{
992 pci_free_consistent(np->pci_dev,
993 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
994 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
995 np->rx_ring, np->ring_dma_addr);
996
997}
998
ad096463 999static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1000{
1001 struct netdev_private *np = netdev_priv(dev);
1002 unsigned entry;
1003
1004 /* Caution: the write order is important here, set the field
1005 with the "ownership" bits last. */
1006
1007 /* Calculate the next Tx descriptor entry. */
1008 entry = np->cur_tx % TX_RING_SIZE;
1009
1010 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1011 skb->data,skb->len, PCI_DMA_TODEVICE);
1012 np->tx_skbuff[entry] = skb;
1013
1014 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1015 if (skb->len < TX_BUFLIMIT) {
1016 np->tx_ring[entry].length = DescWholePkt | skb->len;
1017 } else {
1018 int len = skb->len - TX_BUFLIMIT;
1019
1020 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1021 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1022 }
1023 if(entry == TX_RING_SIZE-1)
1024 np->tx_ring[entry].length |= DescEndRing;
1025
1026 /* Now acquire the irq spinlock.
59c51591 1027 * The difficult race is the ordering between
42eab567 1028 * increasing np->cur_tx and setting DescOwned:
1da177e4
LT
1029 * - if np->cur_tx is increased first the interrupt
1030 * handler could consider the packet as transmitted
42eab567
GG
1031 * since DescOwned is cleared.
1032 * - If DescOwned is set first the NIC could report the
1da177e4
LT
1033 * packet as sent, but the interrupt handler would ignore it
1034 * since the np->cur_tx was not yet increased.
1035 */
1036 spin_lock_irq(&np->lock);
1037 np->cur_tx++;
1038
1039 wmb(); /* flush length, buffer1, buffer2 */
42eab567 1040 np->tx_ring[entry].status = DescOwned;
1da177e4
LT
1041 wmb(); /* flush status and kick the hardware */
1042 iowrite32(0, np->base_addr + TxStartDemand);
1043 np->tx_q_bytes += skb->len;
1044 /* Work around horrible bug in the chip by marking the queue as full
1045 when we do not have FIFO room for a maximum sized packet. */
1046 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1047 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1048 netif_stop_queue(dev);
1049 wmb();
1050 np->tx_full = 1;
1051 }
1052 spin_unlock_irq(&np->lock);
1053
1da177e4 1054 if (debug > 4) {
726b65ad
JP
1055 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1056 np->cur_tx, entry);
1da177e4 1057 }
6ed10654 1058 return NETDEV_TX_OK;
1da177e4
LT
1059}
1060
1061static void netdev_tx_done(struct net_device *dev)
1062{
1063 struct netdev_private *np = netdev_priv(dev);
1064 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1065 int entry = np->dirty_tx % TX_RING_SIZE;
1066 int tx_status = np->tx_ring[entry].status;
1067
1068 if (tx_status < 0)
1069 break;
1070 if (tx_status & 0x8000) { /* There was an error, log it. */
1071#ifndef final_version
1072 if (debug > 1)
726b65ad
JP
1073 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1074 tx_status);
1da177e4
LT
1075#endif
1076 np->stats.tx_errors++;
1077 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1078 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1079 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1080 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1081 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1082 np->stats.tx_heartbeat_errors++;
1083 } else {
1084#ifndef final_version
1085 if (debug > 3)
726b65ad
JP
1086 netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1087 entry, tx_status);
1da177e4
LT
1088#endif
1089 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1090 np->stats.collisions += (tx_status >> 3) & 15;
1091 np->stats.tx_packets++;
1092 }
1093 /* Free the original skb. */
1094 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1095 np->tx_skbuff[entry]->len,
1096 PCI_DMA_TODEVICE);
1097 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1098 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1099 np->tx_skbuff[entry] = NULL;
1100 }
1101 if (np->tx_full &&
1102 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1103 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1104 /* The ring is no longer full, clear tbusy. */
1105 np->tx_full = 0;
1106 wmb();
1107 netif_wake_queue(dev);
1108 }
1109}
1110
1111/* The interrupt handler does all of the Rx thread work and cleans up
1112 after the Tx thread. */
7d12e780 1113static irqreturn_t intr_handler(int irq, void *dev_instance)
1da177e4
LT
1114{
1115 struct net_device *dev = (struct net_device *)dev_instance;
1116 struct netdev_private *np = netdev_priv(dev);
1117 void __iomem *ioaddr = np->base_addr;
1118 int work_limit = max_interrupt_work;
1119 int handled = 0;
1120
1121 if (!netif_device_present(dev))
1122 return IRQ_NONE;
1123 do {
1124 u32 intr_status = ioread32(ioaddr + IntrStatus);
1125
1126 /* Acknowledge all of the current interrupt sources ASAP. */
1127 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1128
1129 if (debug > 4)
726b65ad 1130 netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1da177e4
LT
1131
1132 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1133 break;
1134
1135 handled = 1;
1136
42eab567 1137 if (intr_status & (RxIntr | RxNoBuf))
1da177e4
LT
1138 netdev_rx(dev);
1139 if (intr_status & RxNoBuf)
1140 iowrite32(0, ioaddr + RxStartDemand);
1141
42eab567 1142 if (intr_status & (TxNoBuf | TxIntr) &&
1da177e4
LT
1143 np->cur_tx != np->dirty_tx) {
1144 spin_lock(&np->lock);
1145 netdev_tx_done(dev);
1146 spin_unlock(&np->lock);
1147 }
1148
1149 /* Abnormal error summary/uncommon events handlers. */
1ddb9861 1150 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
42eab567 1151 TimerInt | TxDied))
1da177e4
LT
1152 netdev_error(dev, intr_status);
1153
1154 if (--work_limit < 0) {
a1e37bc5
JP
1155 dev_warn(&dev->dev,
1156 "Too much work at interrupt, status=0x%04x\n",
1157 intr_status);
1da177e4
LT
1158 /* Set the timer to re-enable the other interrupts after
1159 10*82usec ticks. */
1160 spin_lock(&np->lock);
1161 if (netif_device_present(dev)) {
1162 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1163 iowrite32(10, ioaddr + GPTimer);
1164 }
1165 spin_unlock(&np->lock);
1166 break;
1167 }
1168 } while (1);
1169
1170 if (debug > 3)
726b65ad
JP
1171 netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1172 ioread32(ioaddr + IntrStatus));
1da177e4
LT
1173 return IRQ_RETVAL(handled);
1174}
1175
1176/* This routine is logically part of the interrupt handler, but separated
1177 for clarity and better register allocation. */
1178static int netdev_rx(struct net_device *dev)
1179{
1180 struct netdev_private *np = netdev_priv(dev);
1181 int entry = np->cur_rx % RX_RING_SIZE;
1182 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1183
1184 if (debug > 4) {
726b65ad
JP
1185 netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1186 entry, np->rx_ring[entry].status);
1da177e4
LT
1187 }
1188
1189 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1190 while (--work_limit >= 0) {
1191 struct w840_rx_desc *desc = np->rx_head_desc;
1192 s32 status = desc->status;
1193
1194 if (debug > 4)
726b65ad
JP
1195 netdev_dbg(dev, " netdev_rx() status was %08x\n",
1196 status);
1da177e4
LT
1197 if (status < 0)
1198 break;
1199 if ((status & 0x38008300) != 0x0300) {
1200 if ((status & 0x38000300) != 0x0300) {
1201 /* Ingore earlier buffers. */
1202 if ((status & 0xffff) != 0x7fff) {
a1e37bc5
JP
1203 dev_warn(&dev->dev,
1204 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1205 np->cur_rx, status);
1da177e4
LT
1206 np->stats.rx_length_errors++;
1207 }
1208 } else if (status & 0x8000) {
1209 /* There was a fatal error. */
1210 if (debug > 2)
726b65ad
JP
1211 netdev_dbg(dev, "Receive error, Rx status %08x\n",
1212 status);
1da177e4
LT
1213 np->stats.rx_errors++; /* end of a packet.*/
1214 if (status & 0x0890) np->stats.rx_length_errors++;
1215 if (status & 0x004C) np->stats.rx_frame_errors++;
1216 if (status & 0x0002) np->stats.rx_crc_errors++;
1217 }
1218 } else {
1219 struct sk_buff *skb;
1220 /* Omit the four octet CRC from the length. */
1221 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1222
1223#ifndef final_version
1224 if (debug > 4)
726b65ad
JP
1225 netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
1226 pkt_len, status);
1da177e4
LT
1227#endif
1228 /* Check if the packet is long enough to accept without copying
1229 to a minimally-sized skbuff. */
8e95a202 1230 if (pkt_len < rx_copybreak &&
21a4e469 1231 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1da177e4
LT
1232 skb_reserve(skb, 2); /* 16 byte align the IP header */
1233 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1234 np->rx_skbuff[entry]->len,
1235 PCI_DMA_FROMDEVICE);
8c7b7faa 1236 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1da177e4
LT
1237 skb_put(skb, pkt_len);
1238 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1239 np->rx_skbuff[entry]->len,
1240 PCI_DMA_FROMDEVICE);
1241 } else {
1242 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1243 np->rx_skbuff[entry]->len,
1244 PCI_DMA_FROMDEVICE);
1245 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1246 np->rx_skbuff[entry] = NULL;
1247 }
1248#ifndef final_version /* Remove after testing. */
1249 /* You will want this info for the initial debug. */
e174961c 1250 if (debug > 5)
726b65ad
JP
1251 netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
1252 &skb->data[0], &skb->data[6],
1253 skb->data[12], skb->data[13],
1254 &skb->data[14]);
1da177e4
LT
1255#endif
1256 skb->protocol = eth_type_trans(skb, dev);
1257 netif_rx(skb);
1da177e4
LT
1258 np->stats.rx_packets++;
1259 np->stats.rx_bytes += pkt_len;
1260 }
1261 entry = (++np->cur_rx) % RX_RING_SIZE;
1262 np->rx_head_desc = &np->rx_ring[entry];
1263 }
1264
1265 /* Refill the Rx ring buffers. */
1266 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1267 struct sk_buff *skb;
1268 entry = np->dirty_rx % RX_RING_SIZE;
1269 if (np->rx_skbuff[entry] == NULL) {
21a4e469 1270 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1da177e4
LT
1271 np->rx_skbuff[entry] = skb;
1272 if (skb == NULL)
1273 break; /* Better luck next round. */
1da177e4 1274 np->rx_addr[entry] = pci_map_single(np->pci_dev,
689be439 1275 skb->data,
bb02aacc 1276 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1277 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1278 }
1279 wmb();
42eab567 1280 np->rx_ring[entry].status = DescOwned;
1da177e4
LT
1281 }
1282
1283 return 0;
1284}
1285
1286static void netdev_error(struct net_device *dev, int intr_status)
1287{
1288 struct netdev_private *np = netdev_priv(dev);
1289 void __iomem *ioaddr = np->base_addr;
1290
1291 if (debug > 2)
726b65ad 1292 netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1da177e4
LT
1293 if (intr_status == 0xffffffff)
1294 return;
1295 spin_lock(&np->lock);
1296 if (intr_status & TxFIFOUnderflow) {
1297 int new;
1298 /* Bump up the Tx threshold */
1299#if 0
1300 /* This causes lots of dropped packets,
1301 * and under high load even tx_timeouts
1302 */
1303 new = np->csr6 + 0x4000;
1304#else
1305 new = (np->csr6 >> 14)&0x7f;
1306 if (new < 64)
1307 new *= 2;
1308 else
1309 new = 127; /* load full packet before starting */
1310 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1311#endif
726b65ad 1312 netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1da177e4
LT
1313 update_csr6(dev, new);
1314 }
42eab567 1315 if (intr_status & RxDied) { /* Missed a Rx frame. */
1da177e4
LT
1316 np->stats.rx_errors++;
1317 }
1318 if (intr_status & TimerInt) {
1319 /* Re-enable other interrupts. */
1320 if (netif_device_present(dev))
1321 iowrite32(0x1A0F5, ioaddr + IntrEnable);
1322 }
1323 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1324 iowrite32(0, ioaddr + RxStartDemand);
1325 spin_unlock(&np->lock);
1326}
1327
1328static struct net_device_stats *get_stats(struct net_device *dev)
1329{
1330 struct netdev_private *np = netdev_priv(dev);
1331 void __iomem *ioaddr = np->base_addr;
1332
1333 /* The chip only need report frame silently dropped. */
1334 spin_lock_irq(&np->lock);
1335 if (netif_running(dev) && netif_device_present(dev))
1336 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1337 spin_unlock_irq(&np->lock);
1338
1339 return &np->stats;
1340}
1341
1342
1343static u32 __set_rx_mode(struct net_device *dev)
1344{
1345 struct netdev_private *np = netdev_priv(dev);
1346 void __iomem *ioaddr = np->base_addr;
1347 u32 mc_filter[2]; /* Multicast hash filter */
1348 u32 rx_mode;
1349
1350 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1da177e4 1351 memset(mc_filter, 0xff, sizeof(mc_filter));
42eab567 1352 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1da177e4 1353 | AcceptMyPhys;
4cd24eaf 1354 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
8e95a202 1355 (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
1356 /* Too many to match, or accept all multicasts. */
1357 memset(mc_filter, 0xff, sizeof(mc_filter));
42eab567 1358 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1da177e4 1359 } else {
22bedad3 1360 struct netdev_hw_addr *ha;
4302b67e 1361
1da177e4 1362 memset(mc_filter, 0, sizeof(mc_filter));
22bedad3
JP
1363 netdev_for_each_mc_addr(ha, dev) {
1364 int filbit;
1365
1366 filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1367 filbit &= 0x3f;
1368 mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1da177e4 1369 }
42eab567 1370 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1da177e4
LT
1371 }
1372 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1373 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1374 return rx_mode;
1375}
1376
1377static void set_rx_mode(struct net_device *dev)
1378{
1379 struct netdev_private *np = netdev_priv(dev);
1380 u32 rx_mode = __set_rx_mode(dev);
1381 spin_lock_irq(&np->lock);
1382 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1383 spin_unlock_irq(&np->lock);
1384}
1385
1386static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1387{
1388 struct netdev_private *np = netdev_priv(dev);
1389
68aad78c
RJ
1390 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1391 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1392 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1da177e4
LT
1393}
1394
1395static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1396{
1397 struct netdev_private *np = netdev_priv(dev);
1398 int rc;
1399
1400 spin_lock_irq(&np->lock);
1401 rc = mii_ethtool_gset(&np->mii_if, cmd);
1402 spin_unlock_irq(&np->lock);
1403
1404 return rc;
1405}
1406
1407static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1408{
1409 struct netdev_private *np = netdev_priv(dev);
1410 int rc;
1411
1412 spin_lock_irq(&np->lock);
1413 rc = mii_ethtool_sset(&np->mii_if, cmd);
1414 spin_unlock_irq(&np->lock);
1415
1416 return rc;
1417}
1418
1419static int netdev_nway_reset(struct net_device *dev)
1420{
1421 struct netdev_private *np = netdev_priv(dev);
1422 return mii_nway_restart(&np->mii_if);
1423}
1424
1425static u32 netdev_get_link(struct net_device *dev)
1426{
1427 struct netdev_private *np = netdev_priv(dev);
1428 return mii_link_ok(&np->mii_if);
1429}
1430
1431static u32 netdev_get_msglevel(struct net_device *dev)
1432{
1433 return debug;
1434}
1435
1436static void netdev_set_msglevel(struct net_device *dev, u32 value)
1437{
1438 debug = value;
1439}
1440
7282d491 1441static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
1442 .get_drvinfo = netdev_get_drvinfo,
1443 .get_settings = netdev_get_settings,
1444 .set_settings = netdev_set_settings,
1445 .nway_reset = netdev_nway_reset,
1446 .get_link = netdev_get_link,
1447 .get_msglevel = netdev_get_msglevel,
1448 .set_msglevel = netdev_set_msglevel,
1da177e4
LT
1449};
1450
1451static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1452{
1453 struct mii_ioctl_data *data = if_mii(rq);
1454 struct netdev_private *np = netdev_priv(dev);
1455
1456 switch(cmd) {
1457 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1458 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1459 /* Fall Through */
1460
1461 case SIOCGMIIREG: /* Read MII PHY register. */
1462 spin_lock_irq(&np->lock);
1463 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1464 spin_unlock_irq(&np->lock);
1465 return 0;
1466
1467 case SIOCSMIIREG: /* Write MII PHY register. */
1da177e4
LT
1468 spin_lock_irq(&np->lock);
1469 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1470 spin_unlock_irq(&np->lock);
1471 return 0;
1472 default:
1473 return -EOPNOTSUPP;
1474 }
1475}
1476
1477static int netdev_close(struct net_device *dev)
1478{
1479 struct netdev_private *np = netdev_priv(dev);
1480 void __iomem *ioaddr = np->base_addr;
1481
1482 netif_stop_queue(dev);
1483
1484 if (debug > 1) {
726b65ad
JP
1485 netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1486 ioread32(ioaddr + IntrStatus),
1487 ioread32(ioaddr + NetworkConfig));
1488 netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1489 np->cur_tx, np->dirty_tx,
1490 np->cur_rx, np->dirty_rx);
1da177e4
LT
1491 }
1492
1493 /* Stop the chip's Tx and Rx processes. */
1494 spin_lock_irq(&np->lock);
1495 netif_device_detach(dev);
1496 update_csr6(dev, 0);
1497 iowrite32(0x0000, ioaddr + IntrEnable);
1498 spin_unlock_irq(&np->lock);
1499
c0bd55ef 1500 free_irq(np->pci_dev->irq, dev);
1da177e4
LT
1501 wmb();
1502 netif_device_attach(dev);
1503
1504 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1505 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1506
1507#ifdef __i386__
1508 if (debug > 2) {
1509 int i;
1510
8aa06af4 1511 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1da177e4 1512 for (i = 0; i < TX_RING_SIZE; i++)
a1e37bc5
JP
1513 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1514 i, np->tx_ring[i].length,
1515 np->tx_ring[i].status, np->tx_ring[i].buffer1);
8aa06af4 1516 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1da177e4 1517 for (i = 0; i < RX_RING_SIZE; i++) {
a1e37bc5
JP
1518 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1519 i, np->rx_ring[i].length,
1520 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1da177e4
LT
1521 }
1522 }
1523#endif /* __i386__ debugging only */
1524
1525 del_timer_sync(&np->timer);
1526
1527 free_rxtx_rings(np);
1528 free_ringdesc(np);
1529
1530 return 0;
1531}
1532
779c1a85 1533static void w840_remove1(struct pci_dev *pdev)
1da177e4
LT
1534{
1535 struct net_device *dev = pci_get_drvdata(pdev);
f3b197ac 1536
1da177e4
LT
1537 if (dev) {
1538 struct netdev_private *np = netdev_priv(dev);
1539 unregister_netdev(dev);
1540 pci_release_regions(pdev);
1541 pci_iounmap(pdev, np->base_addr);
1542 free_netdev(dev);
1543 }
1da177e4
LT
1544}
1545
1546#ifdef CONFIG_PM
1547
1548/*
1549 * suspend/resume synchronization:
1550 * - open, close, do_ioctl:
1551 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1552 * - get_stats:
1553 * spin_lock_irq(np->lock), doesn't touch hw if not present
2a97e6b7 1554 * - start_xmit:
932ff279 1555 * synchronize_irq + netif_tx_disable;
1da177e4 1556 * - tx_timeout:
932ff279 1557 * netif_device_detach + netif_tx_disable;
1da177e4 1558 * - set_multicast_list
932ff279 1559 * netif_device_detach + netif_tx_disable;
1da177e4
LT
1560 * - interrupt handler
1561 * doesn't touch hw if not present, synchronize_irq waits for
1562 * running instances of the interrupt handler.
1563 *
1564 * Disabling hw requires clearing csr6 & IntrEnable.
1565 * update_csr6 & all function that write IntrEnable check netif_device_present
1566 * before settings any bits.
1567 *
1568 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1569 * device would cause an irq storm.
1570 */
05adc3b7 1571static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
1572{
1573 struct net_device *dev = pci_get_drvdata (pdev);
1574 struct netdev_private *np = netdev_priv(dev);
1575 void __iomem *ioaddr = np->base_addr;
1576
1577 rtnl_lock();
1578 if (netif_running (dev)) {
1579 del_timer_sync(&np->timer);
1580
1581 spin_lock_irq(&np->lock);
1582 netif_device_detach(dev);
1583 update_csr6(dev, 0);
1584 iowrite32(0, ioaddr + IntrEnable);
1da177e4
LT
1585 spin_unlock_irq(&np->lock);
1586
c0bd55ef 1587 synchronize_irq(np->pci_dev->irq);
932ff279 1588 netif_tx_disable(dev);
6aa20a22 1589
1da177e4
LT
1590 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1591
1592 /* no more hardware accesses behind this line. */
1593
0ee904c3 1594 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1da177e4
LT
1595
1596 /* pci_power_off(pdev, -1); */
1597
1598 free_rxtx_rings(np);
1599 } else {
1600 netif_device_detach(dev);
1601 }
1602 rtnl_unlock();
1603 return 0;
1604}
1605
1606static int w840_resume (struct pci_dev *pdev)
1607{
1608 struct net_device *dev = pci_get_drvdata (pdev);
1609 struct netdev_private *np = netdev_priv(dev);
9f486ae1 1610 int retval = 0;
1da177e4
LT
1611
1612 rtnl_lock();
1613 if (netif_device_present(dev))
1614 goto out; /* device not suspended */
1615 if (netif_running(dev)) {
9f486ae1 1616 if ((retval = pci_enable_device(pdev))) {
a1e37bc5
JP
1617 dev_err(&dev->dev,
1618 "pci_enable_device failed in resume\n");
9f486ae1
VH
1619 goto out;
1620 }
1da177e4
LT
1621 spin_lock_irq(&np->lock);
1622 iowrite32(1, np->base_addr+PCIBusCfg);
1623 ioread32(np->base_addr+PCIBusCfg);
1624 udelay(1);
1625 netif_device_attach(dev);
1626 init_rxtx_rings(dev);
1627 init_registers(dev);
1628 spin_unlock_irq(&np->lock);
1629
1630 netif_wake_queue(dev);
1631
1632 mod_timer(&np->timer, jiffies + 1*HZ);
1633 } else {
1634 netif_device_attach(dev);
1635 }
1636out:
1637 rtnl_unlock();
9f486ae1 1638 return retval;
1da177e4
LT
1639}
1640#endif
1641
1642static struct pci_driver w840_driver = {
1643 .name = DRV_NAME,
1644 .id_table = w840_pci_tbl,
1645 .probe = w840_probe1,
779c1a85 1646 .remove = w840_remove1,
1da177e4
LT
1647#ifdef CONFIG_PM
1648 .suspend = w840_suspend,
1649 .resume = w840_resume,
1650#endif
1651};
1652
1653static int __init w840_init(void)
1654{
1655 printk(version);
29917620 1656 return pci_register_driver(&w840_driver);
1da177e4
LT
1657}
1658
1659static void __exit w840_exit(void)
1660{
1661 pci_unregister_driver(&w840_driver);
1662}
1663
1664module_init(w840_init);
1665module_exit(w840_exit);