8139cp: use PCI_DEVICE() to shorten the PCI device table
[linux-2.6-block.git] / drivers / net / 8139cp.c
CommitLineData
1da177e4
LT
1/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2/*
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
11
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
18
19 See the file COPYING in this distribution for more information.
20
21 Contributors:
f3b197ac 22
1da177e4
LT
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
f3b197ac 26
1da177e4
LT
27 TODO:
28 * Test Tx checksumming thoroughly
29 * Implement dev->tx_timeout
30
31 Low priority TODO:
32 * Complete reset on PciErr
33 * Consider Rx interrupt mitigation using TimerIntr
34 * Investigate using skb->priority with h/w VLAN priority
35 * Investigate using High Priority Tx Queue with skb->priority
36 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
37 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
38 * Implement Tx software interrupt mitigation via
39 Tx descriptor bit
40 * The real minimum of CP_MIN_MTU is 4 bytes. However,
41 for this to be supported, one must(?) turn on packet padding.
42 * Support external MII transceivers (patch available)
43
44 NOTES:
45 * TX checksumming is considered experimental. It is off by
46 default, use ethtool to turn it on.
47
48 */
49
50#define DRV_NAME "8139cp"
51#define DRV_VERSION "1.2"
52#define DRV_RELDATE "Mar 22, 2004"
53
54
1da177e4 55#include <linux/module.h>
e21ba282 56#include <linux/moduleparam.h>
1da177e4
LT
57#include <linux/kernel.h>
58#include <linux/compiler.h>
59#include <linux/netdevice.h>
60#include <linux/etherdevice.h>
61#include <linux/init.h>
62#include <linux/pci.h>
8662d061 63#include <linux/dma-mapping.h>
1da177e4
LT
64#include <linux/delay.h>
65#include <linux/ethtool.h>
66#include <linux/mii.h>
67#include <linux/if_vlan.h>
68#include <linux/crc32.h>
69#include <linux/in.h>
70#include <linux/ip.h>
71#include <linux/tcp.h>
72#include <linux/udp.h>
73#include <linux/cache.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/uaccess.h>
77
78/* VLAN tagging feature enable/disable */
79#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
80#define CP_VLAN_TAG_USED 1
81#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
82 do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
83#else
84#define CP_VLAN_TAG_USED 0
85#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
86 do { (tx_desc)->opts2 = 0; } while (0)
87#endif
88
89/* These identify the driver base version and may not be removed. */
90static char version[] =
91KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
92
93MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
94MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
a78d8927 95MODULE_VERSION(DRV_VERSION);
1da177e4
LT
96MODULE_LICENSE("GPL");
97
98static int debug = -1;
e21ba282 99module_param(debug, int, 0);
1da177e4
LT
100MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
101
102/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
103 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
104static int multicast_filter_limit = 32;
e21ba282 105module_param(multicast_filter_limit, int, 0);
1da177e4
LT
106MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
107
108#define PFX DRV_NAME ": "
109
110#ifndef TRUE
111#define FALSE 0
112#define TRUE (!FALSE)
113#endif
114
115#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
116 NETIF_MSG_PROBE | \
117 NETIF_MSG_LINK)
118#define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
119#define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
120#define CP_REGS_SIZE (0xff + 1)
121#define CP_REGS_VER 1 /* version 1 */
122#define CP_RX_RING_SIZE 64
123#define CP_TX_RING_SIZE 64
124#define CP_RING_BYTES \
125 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
126 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
127 CP_STATS_SIZE)
128#define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
129#define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
130#define TX_BUFFS_AVAIL(CP) \
131 (((CP)->tx_tail <= (CP)->tx_head) ? \
132 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
133 (CP)->tx_tail - (CP)->tx_head - 1)
134
135#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
136#define RX_OFFSET 2
137#define CP_INTERNAL_PHY 32
138
139/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
140#define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
141#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
142#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
143#define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
144
145/* Time in jiffies before concluding the transmitter is hung. */
146#define TX_TIMEOUT (6*HZ)
147
148/* hardware minimum and maximum for a single frame's data payload */
149#define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
150#define CP_MAX_MTU 4096
151
152enum {
153 /* NIC register offsets */
154 MAC0 = 0x00, /* Ethernet hardware address. */
155 MAR0 = 0x08, /* Multicast filter. */
156 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
157 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
158 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
159 Cmd = 0x37, /* Command register */
160 IntrMask = 0x3C, /* Interrupt mask */
161 IntrStatus = 0x3E, /* Interrupt status */
162 TxConfig = 0x40, /* Tx configuration */
163 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
164 RxConfig = 0x44, /* Rx configuration */
165 RxMissed = 0x4C, /* 24 bits valid, write clears */
166 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
167 Config1 = 0x52, /* Config1 */
168 Config3 = 0x59, /* Config3 */
169 Config4 = 0x5A, /* Config4 */
170 MultiIntr = 0x5C, /* Multiple interrupt select */
171 BasicModeCtrl = 0x62, /* MII BMCR */
172 BasicModeStatus = 0x64, /* MII BMSR */
173 NWayAdvert = 0x66, /* MII ADVERTISE */
174 NWayLPAR = 0x68, /* MII LPA */
175 NWayExpansion = 0x6A, /* MII Expansion */
176 Config5 = 0xD8, /* Config5 */
177 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
178 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
179 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
180 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
181 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
182 TxThresh = 0xEC, /* Early Tx threshold */
183 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
184 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
185
186 /* Tx and Rx status descriptors */
187 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
188 RingEnd = (1 << 30), /* End of descriptor ring */
189 FirstFrag = (1 << 29), /* First segment of a packet */
190 LastFrag = (1 << 28), /* Final segment of a packet */
fcec3456
JG
191 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
192 MSSShift = 16, /* MSS value position */
193 MSSMask = 0xfff, /* MSS value: 11 bits */
1da177e4
LT
194 TxError = (1 << 23), /* Tx error summary */
195 RxError = (1 << 20), /* Rx error summary */
196 IPCS = (1 << 18), /* Calculate IP checksum */
197 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
198 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
199 TxVlanTag = (1 << 17), /* Add VLAN tag */
200 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
201 IPFail = (1 << 15), /* IP checksum failed */
202 UDPFail = (1 << 14), /* UDP/IP checksum failed */
203 TCPFail = (1 << 13), /* TCP/IP checksum failed */
204 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
205 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
206 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
207 RxProtoTCP = 1,
208 RxProtoUDP = 2,
209 RxProtoIP = 3,
210 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
211 TxOWC = (1 << 22), /* Tx Out-of-window collision */
212 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
213 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
214 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
215 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
216 RxErrFrame = (1 << 27), /* Rx frame alignment error */
217 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
218 RxErrCRC = (1 << 18), /* Rx CRC error */
219 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
220 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
221 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
222
223 /* StatsAddr register */
224 DumpStats = (1 << 3), /* Begin stats dump */
225
226 /* RxConfig register */
227 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
228 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
229 AcceptErr = 0x20, /* Accept packets with CRC errors */
230 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
231 AcceptBroadcast = 0x08, /* Accept broadcast packets */
232 AcceptMulticast = 0x04, /* Accept multicast packets */
233 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
234 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
235
236 /* IntrMask / IntrStatus registers */
237 PciErr = (1 << 15), /* System error on the PCI bus */
238 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
239 LenChg = (1 << 13), /* Cable length change */
240 SWInt = (1 << 8), /* Software-requested interrupt */
241 TxEmpty = (1 << 7), /* No Tx descriptors available */
242 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
243 LinkChg = (1 << 5), /* Packet underrun, or link change */
244 RxEmpty = (1 << 4), /* No Rx descriptors available */
245 TxErr = (1 << 3), /* Tx error */
246 TxOK = (1 << 2), /* Tx packet sent */
247 RxErr = (1 << 1), /* Rx error */
248 RxOK = (1 << 0), /* Rx packet received */
249 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
250 but hardware likes to raise it */
251
252 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
253 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
254 RxErr | RxOK | IntrResvd,
255
256 /* C mode command register */
257 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
258 RxOn = (1 << 3), /* Rx mode enable */
259 TxOn = (1 << 2), /* Tx mode enable */
260
261 /* C+ mode command register */
262 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
263 RxChkSum = (1 << 5), /* Rx checksum offload enable */
264 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
265 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
266 CpRxOn = (1 << 1), /* Rx mode enable */
267 CpTxOn = (1 << 0), /* Tx mode enable */
268
269 /* Cfg9436 EEPROM control register */
270 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
271 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
272
273 /* TxConfig register */
274 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
275 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
276
277 /* Early Tx Threshold register */
278 TxThreshMask = 0x3f, /* Mask bits 5-0 */
279 TxThreshMax = 2048, /* Max early Tx threshold */
280
281 /* Config1 register */
282 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
283 LWACT = (1 << 4), /* LWAKE active mode */
284 PMEnable = (1 << 0), /* Enable various PM features of chip */
285
286 /* Config3 register */
287 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
288 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
289 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
290
291 /* Config4 register */
292 LWPTN = (1 << 1), /* LWAKE Pattern */
293 LWPME = (1 << 4), /* LANWAKE vs PMEB */
294
295 /* Config5 register */
296 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
297 MWF = (1 << 5), /* Accept Multicast wakeup frame */
298 UWF = (1 << 4), /* Accept Unicast wakeup frame */
299 LANWake = (1 << 1), /* Enable LANWake signal */
300 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
301
302 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
303 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
304 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
305};
306
307static const unsigned int cp_rx_config =
308 (RX_FIFO_THRESH << RxCfgFIFOShift) |
309 (RX_DMA_BURST << RxCfgDMAShift);
310
311struct cp_desc {
312 u32 opts1;
313 u32 opts2;
314 u64 addr;
315};
316
317struct ring_info {
318 struct sk_buff *skb;
5734418d 319 u32 len;
1da177e4
LT
320};
321
322struct cp_dma_stats {
323 u64 tx_ok;
324 u64 rx_ok;
325 u64 tx_err;
326 u32 rx_err;
327 u16 rx_fifo;
328 u16 frame_align;
329 u32 tx_ok_1col;
330 u32 tx_ok_mcol;
331 u64 rx_ok_phys;
332 u64 rx_ok_bcast;
333 u32 rx_ok_mcast;
334 u16 tx_abort;
335 u16 tx_underrun;
336} __attribute__((packed));
337
338struct cp_extra_stats {
339 unsigned long rx_frags;
340};
341
342struct cp_private {
343 void __iomem *regs;
344 struct net_device *dev;
345 spinlock_t lock;
346 u32 msg_enable;
347
348 struct pci_dev *pdev;
349 u32 rx_config;
350 u16 cpcmd;
351
352 struct net_device_stats net_stats;
353 struct cp_extra_stats cp_stats;
1da177e4 354
d03d376d
FR
355 unsigned rx_head ____cacheline_aligned;
356 unsigned rx_tail;
1da177e4 357 struct cp_desc *rx_ring;
0ba894d4 358 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
1da177e4
LT
359
360 unsigned tx_head ____cacheline_aligned;
361 unsigned tx_tail;
1da177e4
LT
362 struct cp_desc *tx_ring;
363 struct ring_info tx_skb[CP_TX_RING_SIZE];
d03d376d
FR
364
365 unsigned rx_buf_sz;
366 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
1da177e4
LT
367
368#if CP_VLAN_TAG_USED
369 struct vlan_group *vlgrp;
370#endif
d03d376d 371 dma_addr_t ring_dma;
1da177e4
LT
372
373 struct mii_if_info mii_if;
374};
375
376#define cpr8(reg) readb(cp->regs + (reg))
377#define cpr16(reg) readw(cp->regs + (reg))
378#define cpr32(reg) readl(cp->regs + (reg))
379#define cpw8(reg,val) writeb((val), cp->regs + (reg))
380#define cpw16(reg,val) writew((val), cp->regs + (reg))
381#define cpw32(reg,val) writel((val), cp->regs + (reg))
382#define cpw8_f(reg,val) do { \
383 writeb((val), cp->regs + (reg)); \
384 readb(cp->regs + (reg)); \
385 } while (0)
386#define cpw16_f(reg,val) do { \
387 writew((val), cp->regs + (reg)); \
388 readw(cp->regs + (reg)); \
389 } while (0)
390#define cpw32_f(reg,val) do { \
391 writel((val), cp->regs + (reg)); \
392 readl(cp->regs + (reg)); \
393 } while (0)
394
395
396static void __cp_set_rx_mode (struct net_device *dev);
397static void cp_tx (struct cp_private *cp);
398static void cp_clean_rings (struct cp_private *cp);
7502cd10
SK
399#ifdef CONFIG_NET_POLL_CONTROLLER
400static void cp_poll_controller(struct net_device *dev);
401#endif
722fdb33
PC
402static int cp_get_eeprom_len(struct net_device *dev);
403static int cp_get_eeprom(struct net_device *dev,
404 struct ethtool_eeprom *eeprom, u8 *data);
405static int cp_set_eeprom(struct net_device *dev,
406 struct ethtool_eeprom *eeprom, u8 *data);
1da177e4
LT
407
408static struct pci_device_id cp_pci_tbl[] = {
cccb20d3
FR
409 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
410 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
1da177e4
LT
411 { },
412};
413MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
414
415static struct {
416 const char str[ETH_GSTRING_LEN];
417} ethtool_stats_keys[] = {
418 { "tx_ok" },
419 { "rx_ok" },
420 { "tx_err" },
421 { "rx_err" },
422 { "rx_fifo" },
423 { "frame_align" },
424 { "tx_ok_1col" },
425 { "tx_ok_mcol" },
426 { "rx_ok_phys" },
427 { "rx_ok_bcast" },
428 { "rx_ok_mcast" },
429 { "tx_abort" },
430 { "tx_underrun" },
431 { "rx_frags" },
432};
433
434
435#if CP_VLAN_TAG_USED
436static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
437{
438 struct cp_private *cp = netdev_priv(dev);
439 unsigned long flags;
440
441 spin_lock_irqsave(&cp->lock, flags);
442 cp->vlgrp = grp;
443 cp->cpcmd |= RxVlanOn;
444 cpw16(CpCmd, cp->cpcmd);
445 spin_unlock_irqrestore(&cp->lock, flags);
446}
447
448static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
449{
450 struct cp_private *cp = netdev_priv(dev);
451 unsigned long flags;
452
453 spin_lock_irqsave(&cp->lock, flags);
454 cp->cpcmd &= ~RxVlanOn;
455 cpw16(CpCmd, cp->cpcmd);
456 if (cp->vlgrp)
457 cp->vlgrp->vlan_devices[vid] = NULL;
458 spin_unlock_irqrestore(&cp->lock, flags);
459}
460#endif /* CP_VLAN_TAG_USED */
461
462static inline void cp_set_rxbufsize (struct cp_private *cp)
463{
464 unsigned int mtu = cp->dev->mtu;
f3b197ac 465
1da177e4
LT
466 if (mtu > ETH_DATA_LEN)
467 /* MTU + ethernet header + FCS + optional VLAN tag */
468 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
469 else
470 cp->rx_buf_sz = PKT_BUF_SZ;
471}
472
473static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
474 struct cp_desc *desc)
475{
476 skb->protocol = eth_type_trans (skb, cp->dev);
477
478 cp->net_stats.rx_packets++;
479 cp->net_stats.rx_bytes += skb->len;
480 cp->dev->last_rx = jiffies;
481
482#if CP_VLAN_TAG_USED
483 if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
484 vlan_hwaccel_receive_skb(skb, cp->vlgrp,
485 be16_to_cpu(desc->opts2 & 0xffff));
486 } else
487#endif
488 netif_receive_skb(skb);
489}
490
491static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
492 u32 status, u32 len)
493{
494 if (netif_msg_rx_err (cp))
495 printk (KERN_DEBUG
496 "%s: rx err, slot %d status 0x%x len %d\n",
497 cp->dev->name, rx_tail, status, len);
498 cp->net_stats.rx_errors++;
499 if (status & RxErrFrame)
500 cp->net_stats.rx_frame_errors++;
501 if (status & RxErrCRC)
502 cp->net_stats.rx_crc_errors++;
503 if ((status & RxErrRunt) || (status & RxErrLong))
504 cp->net_stats.rx_length_errors++;
505 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
506 cp->net_stats.rx_length_errors++;
507 if (status & RxErrFIFO)
508 cp->net_stats.rx_fifo_errors++;
509}
510
511static inline unsigned int cp_rx_csum_ok (u32 status)
512{
513 unsigned int protocol = (status >> 16) & 0x3;
f3b197ac 514
1da177e4
LT
515 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
516 return 1;
517 else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
518 return 1;
519 else if ((protocol == RxProtoIP) && (!(status & IPFail)))
520 return 1;
521 return 0;
522}
523
524static int cp_rx_poll (struct net_device *dev, int *budget)
525{
526 struct cp_private *cp = netdev_priv(dev);
527 unsigned rx_tail = cp->rx_tail;
528 unsigned rx_work = dev->quota;
529 unsigned rx;
530
531rx_status_loop:
532 rx = 0;
533 cpw16(IntrStatus, cp_rx_intr_mask);
534
535 while (1) {
536 u32 status, len;
537 dma_addr_t mapping;
538 struct sk_buff *skb, *new_skb;
539 struct cp_desc *desc;
540 unsigned buflen;
541
0ba894d4 542 skb = cp->rx_skb[rx_tail];
5d9428de 543 BUG_ON(!skb);
1da177e4
LT
544
545 desc = &cp->rx_ring[rx_tail];
546 status = le32_to_cpu(desc->opts1);
547 if (status & DescOwn)
548 break;
549
550 len = (status & 0x1fff) - 4;
3598b57b 551 mapping = le64_to_cpu(desc->addr);
1da177e4
LT
552
553 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
554 /* we don't support incoming fragmented frames.
555 * instead, we attempt to ensure that the
556 * pre-allocated RX skbs are properly sized such
557 * that RX fragments are never encountered
558 */
559 cp_rx_err_acct(cp, rx_tail, status, len);
560 cp->net_stats.rx_dropped++;
561 cp->cp_stats.rx_frags++;
562 goto rx_next;
563 }
564
565 if (status & (RxError | RxErrFIFO)) {
566 cp_rx_err_acct(cp, rx_tail, status, len);
567 goto rx_next;
568 }
569
570 if (netif_msg_rx_status(cp))
571 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
c48e9399 572 dev->name, rx_tail, status, len);
1da177e4
LT
573
574 buflen = cp->rx_buf_sz + RX_OFFSET;
575 new_skb = dev_alloc_skb (buflen);
576 if (!new_skb) {
577 cp->net_stats.rx_dropped++;
578 goto rx_next;
579 }
580
581 skb_reserve(new_skb, RX_OFFSET);
c48e9399 582 new_skb->dev = dev;
1da177e4
LT
583
584 pci_unmap_single(cp->pdev, mapping,
585 buflen, PCI_DMA_FROMDEVICE);
586
587 /* Handle checksum offloading for incoming packets. */
588 if (cp_rx_csum_ok(status))
589 skb->ip_summed = CHECKSUM_UNNECESSARY;
590 else
591 skb->ip_summed = CHECKSUM_NONE;
592
593 skb_put(skb, len);
594
3598b57b
FR
595 mapping = pci_map_single(cp->pdev, new_skb->data, buflen,
596 PCI_DMA_FROMDEVICE);
0ba894d4 597 cp->rx_skb[rx_tail] = new_skb;
1da177e4
LT
598
599 cp_rx_skb(cp, skb, desc);
600 rx++;
601
602rx_next:
603 cp->rx_ring[rx_tail].opts2 = 0;
604 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
605 if (rx_tail == (CP_RX_RING_SIZE - 1))
606 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
607 cp->rx_buf_sz);
608 else
609 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
610 rx_tail = NEXT_RX(rx_tail);
611
612 if (!rx_work--)
613 break;
614 }
615
616 cp->rx_tail = rx_tail;
617
618 dev->quota -= rx;
619 *budget -= rx;
620
621 /* if we did not reach work limit, then we're done with
622 * this round of polling
623 */
624 if (rx_work) {
625 if (cpr16(IntrStatus) & cp_rx_intr_mask)
626 goto rx_status_loop;
627
628 local_irq_disable();
629 cpw16_f(IntrMask, cp_intr_mask);
630 __netif_rx_complete(dev);
631 local_irq_enable();
632
633 return 0; /* done */
634 }
635
636 return 1; /* not done */
637}
638
639static irqreturn_t
640cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
641{
642 struct net_device *dev = dev_instance;
643 struct cp_private *cp;
644 u16 status;
645
646 if (unlikely(dev == NULL))
647 return IRQ_NONE;
648 cp = netdev_priv(dev);
649
650 status = cpr16(IntrStatus);
651 if (!status || (status == 0xFFFF))
652 return IRQ_NONE;
653
654 if (netif_msg_intr(cp))
655 printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
656 dev->name, status, cpr8(Cmd), cpr16(CpCmd));
657
658 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
659
660 spin_lock(&cp->lock);
661
662 /* close possible race's with dev_close */
663 if (unlikely(!netif_running(dev))) {
664 cpw16(IntrMask, 0);
665 spin_unlock(&cp->lock);
666 return IRQ_HANDLED;
667 }
668
669 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
670 if (netif_rx_schedule_prep(dev)) {
671 cpw16_f(IntrMask, cp_norx_intr_mask);
672 __netif_rx_schedule(dev);
673 }
674
675 if (status & (TxOK | TxErr | TxEmpty | SWInt))
676 cp_tx(cp);
677 if (status & LinkChg)
678 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
679
680 spin_unlock(&cp->lock);
681
682 if (status & PciErr) {
683 u16 pci_status;
684
685 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
686 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
687 printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
688 dev->name, status, pci_status);
689
690 /* TODO: reset hardware */
691 }
692
693 return IRQ_HANDLED;
694}
695
7502cd10
SK
696#ifdef CONFIG_NET_POLL_CONTROLLER
697/*
698 * Polling receive - used by netconsole and other diagnostic tools
699 * to allow network i/o with interrupts disabled.
700 */
701static void cp_poll_controller(struct net_device *dev)
702{
703 disable_irq(dev->irq);
704 cp_interrupt(dev->irq, dev, NULL);
705 enable_irq(dev->irq);
706}
707#endif
708
1da177e4
LT
709static void cp_tx (struct cp_private *cp)
710{
711 unsigned tx_head = cp->tx_head;
712 unsigned tx_tail = cp->tx_tail;
713
714 while (tx_tail != tx_head) {
3598b57b 715 struct cp_desc *txd = cp->tx_ring + tx_tail;
1da177e4
LT
716 struct sk_buff *skb;
717 u32 status;
718
719 rmb();
3598b57b 720 status = le32_to_cpu(txd->opts1);
1da177e4
LT
721 if (status & DescOwn)
722 break;
723
724 skb = cp->tx_skb[tx_tail].skb;
5d9428de 725 BUG_ON(!skb);
1da177e4 726
3598b57b 727 pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr),
5734418d 728 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
1da177e4
LT
729
730 if (status & LastFrag) {
731 if (status & (TxError | TxFIFOUnder)) {
732 if (netif_msg_tx_err(cp))
733 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
734 cp->dev->name, status);
735 cp->net_stats.tx_errors++;
736 if (status & TxOWC)
737 cp->net_stats.tx_window_errors++;
738 if (status & TxMaxCol)
739 cp->net_stats.tx_aborted_errors++;
740 if (status & TxLinkFail)
741 cp->net_stats.tx_carrier_errors++;
742 if (status & TxFIFOUnder)
743 cp->net_stats.tx_fifo_errors++;
744 } else {
745 cp->net_stats.collisions +=
746 ((status >> TxColCntShift) & TxColCntMask);
747 cp->net_stats.tx_packets++;
748 cp->net_stats.tx_bytes += skb->len;
749 if (netif_msg_tx_done(cp))
750 printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
751 }
752 dev_kfree_skb_irq(skb);
753 }
754
755 cp->tx_skb[tx_tail].skb = NULL;
756
757 tx_tail = NEXT_TX(tx_tail);
758 }
759
760 cp->tx_tail = tx_tail;
761
762 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
763 netif_wake_queue(cp->dev);
764}
765
766static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
767{
768 struct cp_private *cp = netdev_priv(dev);
769 unsigned entry;
fcec3456 770 u32 eor, flags;
1da177e4
LT
771#if CP_VLAN_TAG_USED
772 u32 vlan_tag = 0;
773#endif
fcec3456 774 int mss = 0;
1da177e4
LT
775
776 spin_lock_irq(&cp->lock);
777
778 /* This is a hard error, log it. */
779 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
780 netif_stop_queue(dev);
781 spin_unlock_irq(&cp->lock);
782 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
783 dev->name);
784 return 1;
785 }
786
787#if CP_VLAN_TAG_USED
788 if (cp->vlgrp && vlan_tx_tag_present(skb))
789 vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
790#endif
791
792 entry = cp->tx_head;
793 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
fcec3456 794 if (dev->features & NETIF_F_TSO)
7967168c 795 mss = skb_shinfo(skb)->gso_size;
fcec3456 796
1da177e4
LT
797 if (skb_shinfo(skb)->nr_frags == 0) {
798 struct cp_desc *txd = &cp->tx_ring[entry];
799 u32 len;
800 dma_addr_t mapping;
801
802 len = skb->len;
803 mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
804 CP_VLAN_TX_TAG(txd, vlan_tag);
805 txd->addr = cpu_to_le64(mapping);
806 wmb();
807
fcec3456
JG
808 flags = eor | len | DescOwn | FirstFrag | LastFrag;
809
810 if (mss)
811 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
812 else if (skb->ip_summed == CHECKSUM_HW) {
1da177e4
LT
813 const struct iphdr *ip = skb->nh.iph;
814 if (ip->protocol == IPPROTO_TCP)
fcec3456 815 flags |= IPCS | TCPCS;
1da177e4 816 else if (ip->protocol == IPPROTO_UDP)
fcec3456 817 flags |= IPCS | UDPCS;
1da177e4 818 else
5734418d 819 WARN_ON(1); /* we need a WARN() */
fcec3456
JG
820 }
821
822 txd->opts1 = cpu_to_le32(flags);
1da177e4
LT
823 wmb();
824
825 cp->tx_skb[entry].skb = skb;
5734418d 826 cp->tx_skb[entry].len = len;
1da177e4
LT
827 entry = NEXT_TX(entry);
828 } else {
829 struct cp_desc *txd;
830 u32 first_len, first_eor;
831 dma_addr_t first_mapping;
832 int frag, first_entry = entry;
833 const struct iphdr *ip = skb->nh.iph;
834
835 /* We must give this initial chunk to the device last.
836 * Otherwise we could race with the device.
837 */
838 first_eor = eor;
839 first_len = skb_headlen(skb);
840 first_mapping = pci_map_single(cp->pdev, skb->data,
841 first_len, PCI_DMA_TODEVICE);
842 cp->tx_skb[entry].skb = skb;
5734418d 843 cp->tx_skb[entry].len = first_len;
1da177e4
LT
844 entry = NEXT_TX(entry);
845
846 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
847 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
848 u32 len;
849 u32 ctrl;
850 dma_addr_t mapping;
851
852 len = this_frag->size;
853 mapping = pci_map_single(cp->pdev,
854 ((void *) page_address(this_frag->page) +
855 this_frag->page_offset),
856 len, PCI_DMA_TODEVICE);
857 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
858
fcec3456
JG
859 ctrl = eor | len | DescOwn;
860
861 if (mss)
862 ctrl |= LargeSend |
863 ((mss & MSSMask) << MSSShift);
864 else if (skb->ip_summed == CHECKSUM_HW) {
1da177e4 865 if (ip->protocol == IPPROTO_TCP)
fcec3456 866 ctrl |= IPCS | TCPCS;
1da177e4 867 else if (ip->protocol == IPPROTO_UDP)
fcec3456 868 ctrl |= IPCS | UDPCS;
1da177e4
LT
869 else
870 BUG();
fcec3456 871 }
1da177e4
LT
872
873 if (frag == skb_shinfo(skb)->nr_frags - 1)
874 ctrl |= LastFrag;
875
876 txd = &cp->tx_ring[entry];
877 CP_VLAN_TX_TAG(txd, vlan_tag);
878 txd->addr = cpu_to_le64(mapping);
879 wmb();
880
881 txd->opts1 = cpu_to_le32(ctrl);
882 wmb();
883
884 cp->tx_skb[entry].skb = skb;
5734418d 885 cp->tx_skb[entry].len = len;
1da177e4
LT
886 entry = NEXT_TX(entry);
887 }
888
889 txd = &cp->tx_ring[first_entry];
890 CP_VLAN_TX_TAG(txd, vlan_tag);
891 txd->addr = cpu_to_le64(first_mapping);
892 wmb();
893
894 if (skb->ip_summed == CHECKSUM_HW) {
895 if (ip->protocol == IPPROTO_TCP)
896 txd->opts1 = cpu_to_le32(first_eor | first_len |
897 FirstFrag | DescOwn |
898 IPCS | TCPCS);
899 else if (ip->protocol == IPPROTO_UDP)
900 txd->opts1 = cpu_to_le32(first_eor | first_len |
901 FirstFrag | DescOwn |
902 IPCS | UDPCS);
903 else
904 BUG();
905 } else
906 txd->opts1 = cpu_to_le32(first_eor | first_len |
907 FirstFrag | DescOwn);
908 wmb();
909 }
910 cp->tx_head = entry;
911 if (netif_msg_tx_queued(cp))
912 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
913 dev->name, entry, skb->len);
914 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
915 netif_stop_queue(dev);
916
917 spin_unlock_irq(&cp->lock);
918
919 cpw8(TxPoll, NormalTxPoll);
920 dev->trans_start = jiffies;
921
922 return 0;
923}
924
925/* Set or clear the multicast filter for this adaptor.
926 This routine is not state sensitive and need not be SMP locked. */
927
928static void __cp_set_rx_mode (struct net_device *dev)
929{
930 struct cp_private *cp = netdev_priv(dev);
931 u32 mc_filter[2]; /* Multicast hash filter */
932 int i, rx_mode;
933 u32 tmp;
934
935 /* Note: do not reorder, GCC is clever about common statements. */
936 if (dev->flags & IFF_PROMISC) {
937 /* Unconditionally log net taps. */
938 printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
939 dev->name);
940 rx_mode =
941 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
942 AcceptAllPhys;
943 mc_filter[1] = mc_filter[0] = 0xffffffff;
944 } else if ((dev->mc_count > multicast_filter_limit)
945 || (dev->flags & IFF_ALLMULTI)) {
946 /* Too many to filter perfectly -- accept all multicasts. */
947 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
948 mc_filter[1] = mc_filter[0] = 0xffffffff;
949 } else {
950 struct dev_mc_list *mclist;
951 rx_mode = AcceptBroadcast | AcceptMyPhys;
952 mc_filter[1] = mc_filter[0] = 0;
953 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
954 i++, mclist = mclist->next) {
955 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
956
957 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
958 rx_mode |= AcceptMulticast;
959 }
960 }
961
962 /* We can safely update without stopping the chip. */
963 tmp = cp_rx_config | rx_mode;
964 if (cp->rx_config != tmp) {
965 cpw32_f (RxConfig, tmp);
966 cp->rx_config = tmp;
967 }
968 cpw32_f (MAR0 + 0, mc_filter[0]);
969 cpw32_f (MAR0 + 4, mc_filter[1]);
970}
971
972static void cp_set_rx_mode (struct net_device *dev)
973{
974 unsigned long flags;
975 struct cp_private *cp = netdev_priv(dev);
976
977 spin_lock_irqsave (&cp->lock, flags);
978 __cp_set_rx_mode(dev);
979 spin_unlock_irqrestore (&cp->lock, flags);
980}
981
982static void __cp_get_stats(struct cp_private *cp)
983{
984 /* only lower 24 bits valid; write any value to clear */
985 cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
986 cpw32 (RxMissed, 0);
987}
988
989static struct net_device_stats *cp_get_stats(struct net_device *dev)
990{
991 struct cp_private *cp = netdev_priv(dev);
992 unsigned long flags;
993
994 /* The chip only need report frame silently dropped. */
995 spin_lock_irqsave(&cp->lock, flags);
996 if (netif_running(dev) && netif_device_present(dev))
997 __cp_get_stats(cp);
998 spin_unlock_irqrestore(&cp->lock, flags);
999
1000 return &cp->net_stats;
1001}
1002
1003static void cp_stop_hw (struct cp_private *cp)
1004{
1005 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
1006 cpw16_f(IntrMask, 0);
1007 cpw8(Cmd, 0);
1008 cpw16_f(CpCmd, 0);
1009 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
1010
1011 cp->rx_tail = 0;
1012 cp->tx_head = cp->tx_tail = 0;
1013}
1014
1015static void cp_reset_hw (struct cp_private *cp)
1016{
1017 unsigned work = 1000;
1018
1019 cpw8(Cmd, CmdReset);
1020
1021 while (work--) {
1022 if (!(cpr8(Cmd) & CmdReset))
1023 return;
1024
3173c890 1025 schedule_timeout_uninterruptible(10);
1da177e4
LT
1026 }
1027
1028 printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
1029}
1030
1031static inline void cp_start_hw (struct cp_private *cp)
1032{
1033 cpw16(CpCmd, cp->cpcmd);
1034 cpw8(Cmd, RxOn | TxOn);
1035}
1036
1037static void cp_init_hw (struct cp_private *cp)
1038{
1039 struct net_device *dev = cp->dev;
1040 dma_addr_t ring_dma;
1041
1042 cp_reset_hw(cp);
1043
1044 cpw8_f (Cfg9346, Cfg9346_Unlock);
1045
1046 /* Restore our idea of the MAC address. */
1047 cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
1048 cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
1049
1050 cp_start_hw(cp);
1051 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1052
1053 __cp_set_rx_mode(dev);
1054 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1055
1056 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1057 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1058 cpw8(Config3, PARMEnable);
1059 cp->wol_enabled = 0;
1060
f3b197ac 1061 cpw8(Config5, cpr8(Config5) & PMEStatus);
1da177e4
LT
1062
1063 cpw32_f(HiTxRingAddr, 0);
1064 cpw32_f(HiTxRingAddr + 4, 0);
1065
1066 ring_dma = cp->ring_dma;
1067 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1068 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1069
1070 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1071 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1072 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1073
1074 cpw16(MultiIntr, 0);
1075
1076 cpw16_f(IntrMask, cp_intr_mask);
1077
1078 cpw8_f(Cfg9346, Cfg9346_Lock);
1079}
1080
1081static int cp_refill_rx (struct cp_private *cp)
1082{
1083 unsigned i;
1084
1085 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1086 struct sk_buff *skb;
3598b57b 1087 dma_addr_t mapping;
1da177e4
LT
1088
1089 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
1090 if (!skb)
1091 goto err_out;
1092
1093 skb->dev = cp->dev;
1094 skb_reserve(skb, RX_OFFSET);
1095
3598b57b
FR
1096 mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
1097 PCI_DMA_FROMDEVICE);
0ba894d4 1098 cp->rx_skb[i] = skb;
1da177e4
LT
1099
1100 cp->rx_ring[i].opts2 = 0;
3598b57b 1101 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1da177e4
LT
1102 if (i == (CP_RX_RING_SIZE - 1))
1103 cp->rx_ring[i].opts1 =
1104 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1105 else
1106 cp->rx_ring[i].opts1 =
1107 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1108 }
1109
1110 return 0;
1111
1112err_out:
1113 cp_clean_rings(cp);
1114 return -ENOMEM;
1115}
1116
576cfa93
FR
1117static void cp_init_rings_index (struct cp_private *cp)
1118{
1119 cp->rx_tail = 0;
1120 cp->tx_head = cp->tx_tail = 0;
1121}
1122
1da177e4
LT
1123static int cp_init_rings (struct cp_private *cp)
1124{
1125 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1126 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1127
576cfa93 1128 cp_init_rings_index(cp);
1da177e4
LT
1129
1130 return cp_refill_rx (cp);
1131}
1132
1133static int cp_alloc_rings (struct cp_private *cp)
1134{
1135 void *mem;
1136
1137 mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
1138 if (!mem)
1139 return -ENOMEM;
1140
1141 cp->rx_ring = mem;
1142 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1143
1da177e4
LT
1144 return cp_init_rings(cp);
1145}
1146
1147static void cp_clean_rings (struct cp_private *cp)
1148{
3598b57b 1149 struct cp_desc *desc;
1da177e4
LT
1150 unsigned i;
1151
1da177e4 1152 for (i = 0; i < CP_RX_RING_SIZE; i++) {
0ba894d4 1153 if (cp->rx_skb[i]) {
3598b57b
FR
1154 desc = cp->rx_ring + i;
1155 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
1da177e4 1156 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
0ba894d4 1157 dev_kfree_skb(cp->rx_skb[i]);
1da177e4
LT
1158 }
1159 }
1160
1161 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1162 if (cp->tx_skb[i].skb) {
1163 struct sk_buff *skb = cp->tx_skb[i].skb;
5734418d 1164
3598b57b
FR
1165 desc = cp->tx_ring + i;
1166 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
5734418d 1167 cp->tx_skb[i].len, PCI_DMA_TODEVICE);
3598b57b 1168 if (le32_to_cpu(desc->opts1) & LastFrag)
5734418d 1169 dev_kfree_skb(skb);
1da177e4
LT
1170 cp->net_stats.tx_dropped++;
1171 }
1172 }
1173
5734418d
FR
1174 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1175 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1176
0ba894d4 1177 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1da177e4
LT
1178 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
1179}
1180
1181static void cp_free_rings (struct cp_private *cp)
1182{
1183 cp_clean_rings(cp);
1184 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1185 cp->rx_ring = NULL;
1186 cp->tx_ring = NULL;
1da177e4
LT
1187}
1188
1189static int cp_open (struct net_device *dev)
1190{
1191 struct cp_private *cp = netdev_priv(dev);
1192 int rc;
1193
1194 if (netif_msg_ifup(cp))
1195 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1196
1197 rc = cp_alloc_rings(cp);
1198 if (rc)
1199 return rc;
1200
1201 cp_init_hw(cp);
1202
1fb9df5d 1203 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1da177e4
LT
1204 if (rc)
1205 goto err_out_hw;
1206
1207 netif_carrier_off(dev);
1208 mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
1209 netif_start_queue(dev);
1210
1211 return 0;
1212
1213err_out_hw:
1214 cp_stop_hw(cp);
1215 cp_free_rings(cp);
1216 return rc;
1217}
1218
1219static int cp_close (struct net_device *dev)
1220{
1221 struct cp_private *cp = netdev_priv(dev);
1222 unsigned long flags;
1223
1224 if (netif_msg_ifdown(cp))
1225 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1226
1227 spin_lock_irqsave(&cp->lock, flags);
1228
1229 netif_stop_queue(dev);
1230 netif_carrier_off(dev);
1231
1232 cp_stop_hw(cp);
1233
1234 spin_unlock_irqrestore(&cp->lock, flags);
1235
1236 synchronize_irq(dev->irq);
1237 free_irq(dev->irq, dev);
1238
1239 cp_free_rings(cp);
1240 return 0;
1241}
1242
1243#ifdef BROKEN
1244static int cp_change_mtu(struct net_device *dev, int new_mtu)
1245{
1246 struct cp_private *cp = netdev_priv(dev);
1247 int rc;
1248 unsigned long flags;
1249
1250 /* check for invalid MTU, according to hardware limits */
1251 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1252 return -EINVAL;
1253
1254 /* if network interface not up, no need for complexity */
1255 if (!netif_running(dev)) {
1256 dev->mtu = new_mtu;
1257 cp_set_rxbufsize(cp); /* set new rx buf size */
1258 return 0;
1259 }
1260
1261 spin_lock_irqsave(&cp->lock, flags);
1262
1263 cp_stop_hw(cp); /* stop h/w and free rings */
1264 cp_clean_rings(cp);
1265
1266 dev->mtu = new_mtu;
1267 cp_set_rxbufsize(cp); /* set new rx buf size */
1268
1269 rc = cp_init_rings(cp); /* realloc and restart h/w */
1270 cp_start_hw(cp);
1271
1272 spin_unlock_irqrestore(&cp->lock, flags);
1273
1274 return rc;
1275}
1276#endif /* BROKEN */
1277
f71e1309 1278static const char mii_2_8139_map[8] = {
1da177e4
LT
1279 BasicModeCtrl,
1280 BasicModeStatus,
1281 0,
1282 0,
1283 NWayAdvert,
1284 NWayLPAR,
1285 NWayExpansion,
1286 0
1287};
1288
1289static int mdio_read(struct net_device *dev, int phy_id, int location)
1290{
1291 struct cp_private *cp = netdev_priv(dev);
1292
1293 return location < 8 && mii_2_8139_map[location] ?
1294 readw(cp->regs + mii_2_8139_map[location]) : 0;
1295}
1296
1297
1298static void mdio_write(struct net_device *dev, int phy_id, int location,
1299 int value)
1300{
1301 struct cp_private *cp = netdev_priv(dev);
1302
1303 if (location == 0) {
1304 cpw8(Cfg9346, Cfg9346_Unlock);
1305 cpw16(BasicModeCtrl, value);
1306 cpw8(Cfg9346, Cfg9346_Lock);
1307 } else if (location < 8 && mii_2_8139_map[location])
1308 cpw16(mii_2_8139_map[location], value);
1309}
1310
1311/* Set the ethtool Wake-on-LAN settings */
1312static int netdev_set_wol (struct cp_private *cp,
1313 const struct ethtool_wolinfo *wol)
1314{
1315 u8 options;
1316
1317 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1318 /* If WOL is being disabled, no need for complexity */
1319 if (wol->wolopts) {
1320 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1321 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1322 }
1323
1324 cpw8 (Cfg9346, Cfg9346_Unlock);
1325 cpw8 (Config3, options);
1326 cpw8 (Cfg9346, Cfg9346_Lock);
1327
1328 options = 0; /* Paranoia setting */
1329 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1330 /* If WOL is being disabled, no need for complexity */
1331 if (wol->wolopts) {
1332 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1333 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1334 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1335 }
1336
1337 cpw8 (Config5, options);
1338
1339 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1340
1341 return 0;
1342}
1343
1344/* Get the ethtool Wake-on-LAN settings */
1345static void netdev_get_wol (struct cp_private *cp,
1346 struct ethtool_wolinfo *wol)
1347{
1348 u8 options;
1349
1350 wol->wolopts = 0; /* Start from scratch */
1351 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1352 WAKE_MCAST | WAKE_UCAST;
1353 /* We don't need to go on if WOL is disabled */
1354 if (!cp->wol_enabled) return;
f3b197ac 1355
1da177e4
LT
1356 options = cpr8 (Config3);
1357 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1358 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1359
1360 options = 0; /* Paranoia setting */
1361 options = cpr8 (Config5);
1362 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1363 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1364 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1365}
1366
1367static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1368{
1369 struct cp_private *cp = netdev_priv(dev);
1370
1371 strcpy (info->driver, DRV_NAME);
1372 strcpy (info->version, DRV_VERSION);
1373 strcpy (info->bus_info, pci_name(cp->pdev));
1374}
1375
1376static int cp_get_regs_len(struct net_device *dev)
1377{
1378 return CP_REGS_SIZE;
1379}
1380
1381static int cp_get_stats_count (struct net_device *dev)
1382{
1383 return CP_NUM_STATS;
1384}
1385
1386static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1387{
1388 struct cp_private *cp = netdev_priv(dev);
1389 int rc;
1390 unsigned long flags;
1391
1392 spin_lock_irqsave(&cp->lock, flags);
1393 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1394 spin_unlock_irqrestore(&cp->lock, flags);
1395
1396 return rc;
1397}
1398
1399static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1400{
1401 struct cp_private *cp = netdev_priv(dev);
1402 int rc;
1403 unsigned long flags;
1404
1405 spin_lock_irqsave(&cp->lock, flags);
1406 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1407 spin_unlock_irqrestore(&cp->lock, flags);
1408
1409 return rc;
1410}
1411
1412static int cp_nway_reset(struct net_device *dev)
1413{
1414 struct cp_private *cp = netdev_priv(dev);
1415 return mii_nway_restart(&cp->mii_if);
1416}
1417
1418static u32 cp_get_msglevel(struct net_device *dev)
1419{
1420 struct cp_private *cp = netdev_priv(dev);
1421 return cp->msg_enable;
1422}
1423
1424static void cp_set_msglevel(struct net_device *dev, u32 value)
1425{
1426 struct cp_private *cp = netdev_priv(dev);
1427 cp->msg_enable = value;
1428}
1429
1430static u32 cp_get_rx_csum(struct net_device *dev)
1431{
1432 struct cp_private *cp = netdev_priv(dev);
1433 return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1434}
1435
1436static int cp_set_rx_csum(struct net_device *dev, u32 data)
1437{
1438 struct cp_private *cp = netdev_priv(dev);
1439 u16 cmd = cp->cpcmd, newcmd;
1440
1441 newcmd = cmd;
1442
1443 if (data)
1444 newcmd |= RxChkSum;
1445 else
1446 newcmd &= ~RxChkSum;
1447
1448 if (newcmd != cmd) {
1449 unsigned long flags;
1450
1451 spin_lock_irqsave(&cp->lock, flags);
1452 cp->cpcmd = newcmd;
1453 cpw16_f(CpCmd, newcmd);
1454 spin_unlock_irqrestore(&cp->lock, flags);
1455 }
1456
1457 return 0;
1458}
1459
1460static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1461 void *p)
1462{
1463 struct cp_private *cp = netdev_priv(dev);
1464 unsigned long flags;
1465
1466 if (regs->len < CP_REGS_SIZE)
1467 return /* -EINVAL */;
1468
1469 regs->version = CP_REGS_VER;
1470
1471 spin_lock_irqsave(&cp->lock, flags);
1472 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1473 spin_unlock_irqrestore(&cp->lock, flags);
1474}
1475
1476static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1477{
1478 struct cp_private *cp = netdev_priv(dev);
1479 unsigned long flags;
1480
1481 spin_lock_irqsave (&cp->lock, flags);
1482 netdev_get_wol (cp, wol);
1483 spin_unlock_irqrestore (&cp->lock, flags);
1484}
1485
1486static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1487{
1488 struct cp_private *cp = netdev_priv(dev);
1489 unsigned long flags;
1490 int rc;
1491
1492 spin_lock_irqsave (&cp->lock, flags);
1493 rc = netdev_set_wol (cp, wol);
1494 spin_unlock_irqrestore (&cp->lock, flags);
1495
1496 return rc;
1497}
1498
1499static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1500{
1501 switch (stringset) {
1502 case ETH_SS_STATS:
1503 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1504 break;
1505 default:
1506 BUG();
1507 break;
1508 }
1509}
1510
1511static void cp_get_ethtool_stats (struct net_device *dev,
1512 struct ethtool_stats *estats, u64 *tmp_stats)
1513{
1514 struct cp_private *cp = netdev_priv(dev);
8b512927
SH
1515 struct cp_dma_stats *nic_stats;
1516 dma_addr_t dma;
1da177e4
LT
1517 int i;
1518
8b512927
SH
1519 nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
1520 if (!nic_stats)
1521 return;
97f568d8 1522
1da177e4 1523 /* begin NIC statistics dump */
8b512927
SH
1524 cpw32(StatsAddr + 4, (u64)dma >> 32);
1525 cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
1da177e4
LT
1526 cpr32(StatsAddr);
1527
97f568d8 1528 for (i = 0; i < 1000; i++) {
1da177e4
LT
1529 if ((cpr32(StatsAddr) & DumpStats) == 0)
1530 break;
97f568d8 1531 udelay(10);
1da177e4 1532 }
97f568d8
SH
1533 cpw32(StatsAddr, 0);
1534 cpw32(StatsAddr + 4, 0);
8b512927 1535 cpr32(StatsAddr);
1da177e4
LT
1536
1537 i = 0;
8b512927
SH
1538 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1539 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1540 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1541 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1542 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1543 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1544 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1545 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1546 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1547 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1548 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1549 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1550 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1da177e4 1551 tmp_stats[i++] = cp->cp_stats.rx_frags;
5d9428de 1552 BUG_ON(i != CP_NUM_STATS);
8b512927
SH
1553
1554 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
1da177e4
LT
1555}
1556
1557static struct ethtool_ops cp_ethtool_ops = {
1558 .get_drvinfo = cp_get_drvinfo,
1559 .get_regs_len = cp_get_regs_len,
1560 .get_stats_count = cp_get_stats_count,
1561 .get_settings = cp_get_settings,
1562 .set_settings = cp_set_settings,
1563 .nway_reset = cp_nway_reset,
1564 .get_link = ethtool_op_get_link,
1565 .get_msglevel = cp_get_msglevel,
1566 .set_msglevel = cp_set_msglevel,
1567 .get_rx_csum = cp_get_rx_csum,
1568 .set_rx_csum = cp_set_rx_csum,
1569 .get_tx_csum = ethtool_op_get_tx_csum,
1570 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1571 .get_sg = ethtool_op_get_sg,
1572 .set_sg = ethtool_op_set_sg,
fcec3456
JG
1573 .get_tso = ethtool_op_get_tso,
1574 .set_tso = ethtool_op_set_tso,
1da177e4
LT
1575 .get_regs = cp_get_regs,
1576 .get_wol = cp_get_wol,
1577 .set_wol = cp_set_wol,
1578 .get_strings = cp_get_strings,
1579 .get_ethtool_stats = cp_get_ethtool_stats,
bb0ce608 1580 .get_perm_addr = ethtool_op_get_perm_addr,
722fdb33
PC
1581 .get_eeprom_len = cp_get_eeprom_len,
1582 .get_eeprom = cp_get_eeprom,
1583 .set_eeprom = cp_set_eeprom,
1da177e4
LT
1584};
1585
1586static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1587{
1588 struct cp_private *cp = netdev_priv(dev);
1589 int rc;
1590 unsigned long flags;
1591
1592 if (!netif_running(dev))
1593 return -EINVAL;
1594
1595 spin_lock_irqsave(&cp->lock, flags);
1596 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1597 spin_unlock_irqrestore(&cp->lock, flags);
1598 return rc;
1599}
1600
1601/* Serial EEPROM section. */
1602
1603/* EEPROM_Ctrl bits. */
1604#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1605#define EE_CS 0x08 /* EEPROM chip select. */
1606#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1607#define EE_WRITE_0 0x00
1608#define EE_WRITE_1 0x02
1609#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1610#define EE_ENB (0x80 | EE_CS)
1611
1612/* Delay between EEPROM clock transitions.
1613 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1614 */
1615
1616#define eeprom_delay() readl(ee_addr)
1617
1618/* The EEPROM commands include the alway-set leading bit. */
722fdb33 1619#define EE_EXTEND_CMD (4)
1da177e4
LT
1620#define EE_WRITE_CMD (5)
1621#define EE_READ_CMD (6)
1622#define EE_ERASE_CMD (7)
1623
722fdb33
PC
1624#define EE_EWDS_ADDR (0)
1625#define EE_WRAL_ADDR (1)
1626#define EE_ERAL_ADDR (2)
1627#define EE_EWEN_ADDR (3)
1628
1629#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1da177e4 1630
722fdb33
PC
1631static void eeprom_cmd_start(void __iomem *ee_addr)
1632{
1da177e4
LT
1633 writeb (EE_ENB & ~EE_CS, ee_addr);
1634 writeb (EE_ENB, ee_addr);
1635 eeprom_delay ();
722fdb33 1636}
1da177e4 1637
722fdb33
PC
1638static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1639{
1640 int i;
1641
1642 /* Shift the command bits out. */
1643 for (i = cmd_len - 1; i >= 0; i--) {
1644 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1da177e4
LT
1645 writeb (EE_ENB | dataval, ee_addr);
1646 eeprom_delay ();
1647 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1648 eeprom_delay ();
1649 }
1650 writeb (EE_ENB, ee_addr);
1651 eeprom_delay ();
722fdb33
PC
1652}
1653
1654static void eeprom_cmd_end(void __iomem *ee_addr)
1655{
1656 writeb (~EE_CS, ee_addr);
1657 eeprom_delay ();
1658}
1659
1660static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1661 int addr_len)
1662{
1663 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1664
1665 eeprom_cmd_start(ee_addr);
1666 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1667 eeprom_cmd_end(ee_addr);
1668}
1669
1670static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1671{
1672 int i;
1673 u16 retval = 0;
1674 void __iomem *ee_addr = ioaddr + Cfg9346;
1675 int read_cmd = location | (EE_READ_CMD << addr_len);
1676
1677 eeprom_cmd_start(ee_addr);
1678 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1da177e4
LT
1679
1680 for (i = 16; i > 0; i--) {
1681 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1682 eeprom_delay ();
1683 retval =
1684 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1685 0);
1686 writeb (EE_ENB, ee_addr);
1687 eeprom_delay ();
1688 }
1689
722fdb33 1690 eeprom_cmd_end(ee_addr);
1da177e4
LT
1691
1692 return retval;
1693}
1694
722fdb33
PC
1695static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1696 int addr_len)
1697{
1698 int i;
1699 void __iomem *ee_addr = ioaddr + Cfg9346;
1700 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1701
1702 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1703
1704 eeprom_cmd_start(ee_addr);
1705 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1706 eeprom_cmd(ee_addr, val, 16);
1707 eeprom_cmd_end(ee_addr);
1708
1709 eeprom_cmd_start(ee_addr);
1710 for (i = 0; i < 20000; i++)
1711 if (readb(ee_addr) & EE_DATA_READ)
1712 break;
1713 eeprom_cmd_end(ee_addr);
1714
1715 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1716}
1717
1718static int cp_get_eeprom_len(struct net_device *dev)
1719{
1720 struct cp_private *cp = netdev_priv(dev);
1721 int size;
1722
1723 spin_lock_irq(&cp->lock);
1724 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1725 spin_unlock_irq(&cp->lock);
1726
1727 return size;
1728}
1729
1730static int cp_get_eeprom(struct net_device *dev,
1731 struct ethtool_eeprom *eeprom, u8 *data)
1732{
1733 struct cp_private *cp = netdev_priv(dev);
1734 unsigned int addr_len;
1735 u16 val;
1736 u32 offset = eeprom->offset >> 1;
1737 u32 len = eeprom->len;
1738 u32 i = 0;
1739
1740 eeprom->magic = CP_EEPROM_MAGIC;
1741
1742 spin_lock_irq(&cp->lock);
1743
1744 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1745
1746 if (eeprom->offset & 1) {
1747 val = read_eeprom(cp->regs, offset, addr_len);
1748 data[i++] = (u8)(val >> 8);
1749 offset++;
1750 }
1751
1752 while (i < len - 1) {
1753 val = read_eeprom(cp->regs, offset, addr_len);
1754 data[i++] = (u8)val;
1755 data[i++] = (u8)(val >> 8);
1756 offset++;
1757 }
1758
1759 if (i < len) {
1760 val = read_eeprom(cp->regs, offset, addr_len);
1761 data[i] = (u8)val;
1762 }
1763
1764 spin_unlock_irq(&cp->lock);
1765 return 0;
1766}
1767
1768static int cp_set_eeprom(struct net_device *dev,
1769 struct ethtool_eeprom *eeprom, u8 *data)
1770{
1771 struct cp_private *cp = netdev_priv(dev);
1772 unsigned int addr_len;
1773 u16 val;
1774 u32 offset = eeprom->offset >> 1;
1775 u32 len = eeprom->len;
1776 u32 i = 0;
1777
1778 if (eeprom->magic != CP_EEPROM_MAGIC)
1779 return -EINVAL;
1780
1781 spin_lock_irq(&cp->lock);
1782
1783 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1784
1785 if (eeprom->offset & 1) {
1786 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1787 val |= (u16)data[i++] << 8;
1788 write_eeprom(cp->regs, offset, val, addr_len);
1789 offset++;
1790 }
1791
1792 while (i < len - 1) {
1793 val = (u16)data[i++];
1794 val |= (u16)data[i++] << 8;
1795 write_eeprom(cp->regs, offset, val, addr_len);
1796 offset++;
1797 }
1798
1799 if (i < len) {
1800 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1801 val |= (u16)data[i];
1802 write_eeprom(cp->regs, offset, val, addr_len);
1803 }
1804
1805 spin_unlock_irq(&cp->lock);
1806 return 0;
1807}
1808
1da177e4
LT
1809/* Put the board into D3cold state and wait for WakeUp signal */
1810static void cp_set_d3_state (struct cp_private *cp)
1811{
1812 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1813 pci_set_power_state (cp->pdev, PCI_D3hot);
1814}
1815
1816static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1817{
1818 struct net_device *dev;
1819 struct cp_private *cp;
1820 int rc;
1821 void __iomem *regs;
2427ddd8 1822 resource_size_t pciaddr;
1da177e4
LT
1823 unsigned int addr_len, i, pci_using_dac;
1824 u8 pci_rev;
1825
1826#ifndef MODULE
1827 static int version_printed;
1828 if (version_printed++ == 0)
1829 printk("%s", version);
1830#endif
1831
1832 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
1833
1834 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1835 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
9b91cf9d 1836 dev_err(&pdev->dev,
2e8a538d
JG
1837 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1838 pdev->vendor, pdev->device, pci_rev);
9b91cf9d 1839 dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
1da177e4
LT
1840 return -ENODEV;
1841 }
1842
1843 dev = alloc_etherdev(sizeof(struct cp_private));
1844 if (!dev)
1845 return -ENOMEM;
1846 SET_MODULE_OWNER(dev);
1847 SET_NETDEV_DEV(dev, &pdev->dev);
1848
1849 cp = netdev_priv(dev);
1850 cp->pdev = pdev;
1851 cp->dev = dev;
1852 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1853 spin_lock_init (&cp->lock);
1854 cp->mii_if.dev = dev;
1855 cp->mii_if.mdio_read = mdio_read;
1856 cp->mii_if.mdio_write = mdio_write;
1857 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1858 cp->mii_if.phy_id_mask = 0x1f;
1859 cp->mii_if.reg_num_mask = 0x1f;
1860 cp_set_rxbufsize(cp);
1861
1862 rc = pci_enable_device(pdev);
1863 if (rc)
1864 goto err_out_free;
1865
1866 rc = pci_set_mwi(pdev);
1867 if (rc)
1868 goto err_out_disable;
1869
1870 rc = pci_request_regions(pdev, DRV_NAME);
1871 if (rc)
1872 goto err_out_mwi;
1873
1874 pciaddr = pci_resource_start(pdev, 1);
1875 if (!pciaddr) {
1876 rc = -EIO;
9b91cf9d 1877 dev_err(&pdev->dev, "no MMIO resource\n");
1da177e4
LT
1878 goto err_out_res;
1879 }
1880 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1881 rc = -EIO;
9b91cf9d 1882 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
2e8a538d 1883 (unsigned long long)pci_resource_len(pdev, 1));
1da177e4
LT
1884 goto err_out_res;
1885 }
1886
1887 /* Configure DMA attributes. */
1888 if ((sizeof(dma_addr_t) > 4) &&
8662d061
TK
1889 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
1890 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
1891 pci_using_dac = 1;
1892 } else {
1893 pci_using_dac = 0;
1894
8662d061 1895 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1da177e4 1896 if (rc) {
9b91cf9d 1897 dev_err(&pdev->dev,
2e8a538d 1898 "No usable DMA configuration, aborting.\n");
1da177e4
LT
1899 goto err_out_res;
1900 }
8662d061 1901 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1da177e4 1902 if (rc) {
9b91cf9d 1903 dev_err(&pdev->dev,
2e8a538d
JG
1904 "No usable consistent DMA configuration, "
1905 "aborting.\n");
1da177e4
LT
1906 goto err_out_res;
1907 }
1908 }
1909
1910 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1911 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1912
1913 regs = ioremap(pciaddr, CP_REGS_SIZE);
1914 if (!regs) {
1915 rc = -EIO;
4626dd46 1916 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
2e8a538d
JG
1917 (unsigned long long)pci_resource_len(pdev, 1),
1918 (unsigned long long)pciaddr);
1da177e4
LT
1919 goto err_out_res;
1920 }
1921 dev->base_addr = (unsigned long) regs;
1922 cp->regs = regs;
1923
1924 cp_stop_hw(cp);
1925
1926 /* read MAC address from EEPROM */
1927 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1928 for (i = 0; i < 3; i++)
1929 ((u16 *) (dev->dev_addr))[i] =
1930 le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
bb0ce608 1931 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
1932
1933 dev->open = cp_open;
1934 dev->stop = cp_close;
1935 dev->set_multicast_list = cp_set_rx_mode;
1936 dev->hard_start_xmit = cp_start_xmit;
1937 dev->get_stats = cp_get_stats;
1938 dev->do_ioctl = cp_ioctl;
1939 dev->poll = cp_rx_poll;
7502cd10
SK
1940#ifdef CONFIG_NET_POLL_CONTROLLER
1941 dev->poll_controller = cp_poll_controller;
1942#endif
1da177e4
LT
1943 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
1944#ifdef BROKEN
1945 dev->change_mtu = cp_change_mtu;
1946#endif
1947 dev->ethtool_ops = &cp_ethtool_ops;
1948#if 0
1949 dev->tx_timeout = cp_tx_timeout;
1950 dev->watchdog_timeo = TX_TIMEOUT;
1951#endif
1952
1953#if CP_VLAN_TAG_USED
1954 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1955 dev->vlan_rx_register = cp_vlan_rx_register;
1956 dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid;
1957#endif
1958
1959 if (pci_using_dac)
1960 dev->features |= NETIF_F_HIGHDMA;
1961
fcec3456
JG
1962#if 0 /* disabled by default until verified */
1963 dev->features |= NETIF_F_TSO;
1964#endif
1965
1da177e4
LT
1966 dev->irq = pdev->irq;
1967
1968 rc = register_netdev(dev);
1969 if (rc)
1970 goto err_out_iomap;
1971
1972 printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
1973 "%02x:%02x:%02x:%02x:%02x:%02x, "
1974 "IRQ %d\n",
1975 dev->name,
1976 dev->base_addr,
1977 dev->dev_addr[0], dev->dev_addr[1],
1978 dev->dev_addr[2], dev->dev_addr[3],
1979 dev->dev_addr[4], dev->dev_addr[5],
1980 dev->irq);
1981
1982 pci_set_drvdata(pdev, dev);
1983
1984 /* enable busmastering and memory-write-invalidate */
1985 pci_set_master(pdev);
1986
2e8a538d
JG
1987 if (cp->wol_enabled)
1988 cp_set_d3_state (cp);
1da177e4
LT
1989
1990 return 0;
1991
1992err_out_iomap:
1993 iounmap(regs);
1994err_out_res:
1995 pci_release_regions(pdev);
1996err_out_mwi:
1997 pci_clear_mwi(pdev);
1998err_out_disable:
1999 pci_disable_device(pdev);
2000err_out_free:
2001 free_netdev(dev);
2002 return rc;
2003}
2004
2005static void cp_remove_one (struct pci_dev *pdev)
2006{
2007 struct net_device *dev = pci_get_drvdata(pdev);
2008 struct cp_private *cp = netdev_priv(dev);
2009
1da177e4
LT
2010 unregister_netdev(dev);
2011 iounmap(cp->regs);
2e8a538d
JG
2012 if (cp->wol_enabled)
2013 pci_set_power_state (pdev, PCI_D0);
1da177e4
LT
2014 pci_release_regions(pdev);
2015 pci_clear_mwi(pdev);
2016 pci_disable_device(pdev);
2017 pci_set_drvdata(pdev, NULL);
2018 free_netdev(dev);
2019}
2020
2021#ifdef CONFIG_PM
05adc3b7 2022static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
1da177e4 2023{
7668a494
FR
2024 struct net_device *dev = pci_get_drvdata(pdev);
2025 struct cp_private *cp = netdev_priv(dev);
1da177e4
LT
2026 unsigned long flags;
2027
7668a494
FR
2028 if (!netif_running(dev))
2029 return 0;
1da177e4
LT
2030
2031 netif_device_detach (dev);
2032 netif_stop_queue (dev);
2033
2034 spin_lock_irqsave (&cp->lock, flags);
2035
2036 /* Disable Rx and Tx */
2037 cpw16 (IntrMask, 0);
2038 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2039
2040 spin_unlock_irqrestore (&cp->lock, flags);
2041
576cfa93
FR
2042 pci_save_state(pdev);
2043 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2044 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1da177e4
LT
2045
2046 return 0;
2047}
2048
2049static int cp_resume (struct pci_dev *pdev)
2050{
576cfa93
FR
2051 struct net_device *dev = pci_get_drvdata (pdev);
2052 struct cp_private *cp = netdev_priv(dev);
a4cf0761 2053 unsigned long flags;
1da177e4 2054
576cfa93
FR
2055 if (!netif_running(dev))
2056 return 0;
1da177e4
LT
2057
2058 netif_device_attach (dev);
576cfa93
FR
2059
2060 pci_set_power_state(pdev, PCI_D0);
2061 pci_restore_state(pdev);
2062 pci_enable_wake(pdev, PCI_D0, 0);
2063
2064 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2065 cp_init_rings_index (cp);
1da177e4
LT
2066 cp_init_hw (cp);
2067 netif_start_queue (dev);
a4cf0761
PO
2068
2069 spin_lock_irqsave (&cp->lock, flags);
2070
2071 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
2072
2073 spin_unlock_irqrestore (&cp->lock, flags);
f3b197ac 2074
1da177e4
LT
2075 return 0;
2076}
2077#endif /* CONFIG_PM */
2078
2079static struct pci_driver cp_driver = {
2080 .name = DRV_NAME,
2081 .id_table = cp_pci_tbl,
2082 .probe = cp_init_one,
2083 .remove = cp_remove_one,
2084#ifdef CONFIG_PM
2085 .resume = cp_resume,
2086 .suspend = cp_suspend,
2087#endif
2088};
2089
2090static int __init cp_init (void)
2091{
2092#ifdef MODULE
2093 printk("%s", version);
2094#endif
2095 return pci_module_init (&cp_driver);
2096}
2097
2098static void __exit cp_exit (void)
2099{
2100 pci_unregister_driver (&cp_driver);
2101}
2102
2103module_init(cp_init);
2104module_exit(cp_exit);