PCI: read revision ID by default
[linux-block.git] / drivers / net / tulip / dmfe.c
CommitLineData
1da177e4
LT
1/*
2 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3 ethernet driver for Linux.
4 Copyright (C) 1997 Sten Wang
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 DAVICOM Web-Site: www.davicom.com.tw
17
18 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20
21 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22
23 Marcelo Tosatti <marcelo@conectiva.com.br> :
24 Made it compile in 2.3 (device to net_device)
25
26 Alan Cox <alan@redhat.com> :
27 Cleaned up for kernel merge.
28 Removed the back compatibility support
29 Reformatted, fixing spelling etc as I went
30 Removed IRQ 0-15 assumption
31
32 Jeff Garzik <jgarzik@pobox.com> :
33 Updated to use new PCI driver API.
34 Resource usage cleanups.
35 Report driver version to user.
36
37 Tobias Ringstrom <tori@unhappy.mine.nu> :
38 Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
39 Andrew Morton and Frank Davis for the SMP safety fixes.
40
41 Vojtech Pavlik <vojtech@suse.cz> :
42 Cleaned up pointer arithmetics.
43 Fixed a lot of 64bit issues.
44 Cleaned up printk()s a bit.
45 Fixed some obvious big endian problems.
46
47 Tobias Ringstrom <tori@unhappy.mine.nu> :
48 Use time_after for jiffies calculation. Added ethtool
49 support. Updated PCI resource allocation. Do not
50 forget to unmap PCI mapped skbs.
51
52 Alan Cox <alan@redhat.com>
f3b197ac 53 Added new PCI identifiers provided by Clear Zhang at ALi
1da177e4
LT
54 for their 1563 ethernet device.
55
56 TODO
57
1da177e4
LT
58 Check on 64 bit boxes.
59 Check and fix on big endian boxes.
60
61 Test and make sure PCI latency is now correct for all cases.
62*/
63
64#define DRV_NAME "dmfe"
65#define DRV_VERSION "1.36.4"
66#define DRV_RELDATE "2002-01-17"
67
68#include <linux/module.h>
69#include <linux/kernel.h>
70#include <linux/string.h>
71#include <linux/timer.h>
72#include <linux/ptrace.h>
73#include <linux/errno.h>
74#include <linux/ioport.h>
75#include <linux/slab.h>
76#include <linux/interrupt.h>
77#include <linux/pci.h>
cb199d42 78#include <linux/dma-mapping.h>
1da177e4
LT
79#include <linux/init.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/ethtool.h>
83#include <linux/skbuff.h>
84#include <linux/delay.h>
85#include <linux/spinlock.h>
86#include <linux/crc32.h>
87#include <linux/bitops.h>
88
89#include <asm/processor.h>
90#include <asm/io.h>
91#include <asm/dma.h>
92#include <asm/uaccess.h>
93#include <asm/irq.h>
94
95
96/* Board/System/Debug information/definition ---------------- */
97#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
98#define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
99#define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
100#define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
101
102#define DM9102_IO_SIZE 0x80
103#define DM9102A_IO_SIZE 0x100
104#define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
105#define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
106#define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
107#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
108#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
109#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
110#define TX_BUF_ALLOC 0x600
111#define RX_ALLOC_SIZE 0x620
112#define DM910X_RESET 1
113#define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
114#define CR6_DEFAULT 0x00080000 /* HD */
115#define CR7_DEFAULT 0x180c1
116#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
117#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
118#define MAX_PACKET_SIZE 1514
119#define DMFE_MAX_MULTICAST 14
120#define RX_COPY_SIZE 100
121#define MAX_CHECK_PACKET 0x8000
122#define DM9801_NOISE_FLOOR 8
123#define DM9802_NOISE_FLOOR 5
124
f1069046
ML
125#define DMFE_WOL_LINKCHANGE 0x20000000
126#define DMFE_WOL_SAMPLEPACKET 0x10000000
127#define DMFE_WOL_MAGICPACKET 0x08000000
128
129
1da177e4
LT
130#define DMFE_10MHF 0
131#define DMFE_100MHF 1
132#define DMFE_10MFD 4
133#define DMFE_100MFD 5
134#define DMFE_AUTO 8
135#define DMFE_1M_HPNA 0x10
136
137#define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
138#define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
139#define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
140#define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
141#define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
142#define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
143
144#define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
145#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
146#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
147
f67ba792
ML
148#define DMFE_DBUG(dbug_now, msg, value) \
149 do { \
150 if (dmfe_debug || (dbug_now)) \
151 printk(KERN_ERR DRV_NAME ": %s %lx\n",\
152 (msg), (long) (value)); \
153 } while (0)
1da177e4 154
f67ba792
ML
155#define SHOW_MEDIA_TYPE(mode) \
156 printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
157 (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
1da177e4
LT
158
159
160/* CR9 definition: SROM/MII */
161#define CR9_SROM_READ 0x4800
162#define CR9_SRCS 0x1
163#define CR9_SRCLK 0x2
164#define CR9_CRDOUT 0x8
165#define SROM_DATA_0 0x0
166#define SROM_DATA_1 0x4
167#define PHY_DATA_1 0x20000
168#define PHY_DATA_0 0x00000
169#define MDCLKH 0x10000
170
171#define PHY_POWER_DOWN 0x800
172
173#define SROM_V41_CODE 0x14
174
f67ba792
ML
175#define SROM_CLK_WRITE(data, ioaddr) \
176 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
177 udelay(5); \
178 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
179 udelay(5); \
180 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
181 udelay(5);
182
183#define __CHK_IO_SIZE(pci_id, dev_rev) \
184 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \
185 DM9102A_IO_SIZE: DM9102_IO_SIZE)
1da177e4 186
f67ba792
ML
187#define CHK_IO_SIZE(pci_dev, dev_rev) \
188 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev))
1da177e4
LT
189
190/* Sten Check */
191#define DEVICE net_device
192
193/* Structure/enum declaration ------------------------------- */
194struct tx_desc {
a31e40f6 195 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
1da177e4
LT
196 char *tx_buf_ptr; /* Data for us */
197 struct tx_desc *next_tx_desc;
198} __attribute__(( aligned(32) ));
199
200struct rx_desc {
a31e40f6 201 __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
1da177e4
LT
202 struct sk_buff *rx_skb_ptr; /* Data for us */
203 struct rx_desc *next_rx_desc;
204} __attribute__(( aligned(32) ));
205
206struct dmfe_board_info {
207 u32 chip_id; /* Chip vendor/Device ID */
208 u32 chip_revision; /* Chip revision */
ead9bffb 209 struct DEVICE *next_dev; /* next device */
1da177e4
LT
210 struct pci_dev *pdev; /* PCI device */
211 spinlock_t lock;
212
213 long ioaddr; /* I/O base address */
214 u32 cr0_data;
215 u32 cr5_data;
216 u32 cr6_data;
217 u32 cr7_data;
218 u32 cr15_data;
219
220 /* pointer for memory physical address */
221 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
222 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
223 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
224 dma_addr_t first_tx_desc_dma;
225 dma_addr_t first_rx_desc_dma;
226
227 /* descriptor pointer */
228 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
229 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
230 unsigned char *desc_pool_ptr; /* descriptor pool memory */
231 struct tx_desc *first_tx_desc;
232 struct tx_desc *tx_insert_ptr;
233 struct tx_desc *tx_remove_ptr;
234 struct rx_desc *first_rx_desc;
235 struct rx_desc *rx_insert_ptr;
236 struct rx_desc *rx_ready_ptr; /* packet come pointer */
237 unsigned long tx_packet_cnt; /* transmitted packet count */
238 unsigned long tx_queue_cnt; /* wait to send packet count */
239 unsigned long rx_avail_cnt; /* available rx descriptor count */
240 unsigned long interval_rx_cnt; /* rx packet count a callback time */
241
242 u16 HPNA_command; /* For HPNA register 16 */
243 u16 HPNA_timer; /* For HPNA remote device check */
244 u16 dbug_cnt;
245 u16 NIC_capability; /* NIC media capability */
246 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
247
248 u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
249 u8 chip_type; /* Keep DM9102A chip type */
250 u8 media_mode; /* user specify media mode */
251 u8 op_mode; /* real work media mode */
252 u8 phy_addr;
1da177e4
LT
253 u8 wait_reset; /* Hardware failed, need to reset */
254 u8 dm910x_chk_mode; /* Operating mode check */
255 u8 first_in_callback; /* Flag to record state */
f1069046 256 u8 wol_mode; /* user WOL settings */
1da177e4
LT
257 struct timer_list timer;
258
259 /* System defined statistic counter */
260 struct net_device_stats stats;
261
262 /* Driver defined statistic counter */
263 unsigned long tx_fifo_underrun;
264 unsigned long tx_loss_carrier;
265 unsigned long tx_no_carrier;
266 unsigned long tx_late_collision;
267 unsigned long tx_excessive_collision;
268 unsigned long tx_jabber_timeout;
269 unsigned long reset_count;
270 unsigned long reset_cr8;
271 unsigned long reset_fatal;
272 unsigned long reset_TXtimeout;
273
274 /* NIC SROM data */
275 unsigned char srom[128];
276};
277
278enum dmfe_offsets {
279 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
280 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
281 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
282 DCR15 = 0x78
283};
284
285enum dmfe_CR6_bits {
286 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
287 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
288 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
289};
290
291/* Global variable declaration ----------------------------- */
292static int __devinitdata printed_version;
293static char version[] __devinitdata =
294 KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
295 DRV_VERSION " (" DRV_RELDATE ")\n";
296
297static int dmfe_debug;
298static unsigned char dmfe_media_mode = DMFE_AUTO;
299static u32 dmfe_cr6_user_set;
300
301/* For module input parameter */
302static int debug;
303static u32 cr6set;
304static unsigned char mode = 8;
305static u8 chkmode = 1;
306static u8 HPNA_mode; /* Default: Low Power/High Speed */
307static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
308static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
309static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
310static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
311 4: TX pause packet */
312
313
314/* function declaration ------------------------------------- */
315static int dmfe_open(struct DEVICE *);
316static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
317static int dmfe_stop(struct DEVICE *);
318static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
319static void dmfe_set_filter_mode(struct DEVICE *);
7282d491 320static const struct ethtool_ops netdev_ethtool_ops;
1da177e4 321static u16 read_srom_word(long ,int);
7d12e780 322static irqreturn_t dmfe_interrupt(int , void *);
1da177e4
LT
323#ifdef CONFIG_NET_POLL_CONTROLLER
324static void poll_dmfe (struct net_device *dev);
325#endif
326static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
327static void allocate_rx_buffer(struct dmfe_board_info *);
328static void update_cr6(u32, unsigned long);
329static void send_filter_frame(struct DEVICE * ,int);
330static void dm9132_id_table(struct DEVICE * ,int);
331static u16 phy_read(unsigned long, u8, u8, u32);
332static void phy_write(unsigned long, u8, u8, u16, u32);
333static void phy_write_1bit(unsigned long, u32);
334static u16 phy_read_1bit(unsigned long);
335static u8 dmfe_sense_speed(struct dmfe_board_info *);
336static void dmfe_process_mode(struct dmfe_board_info *);
337static void dmfe_timer(unsigned long);
338static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
339static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
340static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
341static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
342static void dmfe_dynamic_reset(struct DEVICE *);
343static void dmfe_free_rxbuffer(struct dmfe_board_info *);
344static void dmfe_init_dm910x(struct DEVICE *);
345static void dmfe_parse_srom(struct dmfe_board_info *);
346static void dmfe_program_DM9801(struct dmfe_board_info *, int);
347static void dmfe_program_DM9802(struct dmfe_board_info *);
348static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
349static void dmfe_set_phyxcer(struct dmfe_board_info *);
350
f67ba792 351/* DM910X network board routine ---------------------------- */
1da177e4
LT
352
353/*
354 * Search DM910X board ,allocate space and register it
355 */
356
357static int __devinit dmfe_init_one (struct pci_dev *pdev,
358 const struct pci_device_id *ent)
359{
360 struct dmfe_board_info *db; /* board information structure */
361 struct net_device *dev;
362 u32 dev_rev, pci_pmr;
363 int i, err;
364
365 DMFE_DBUG(0, "dmfe_init_one()", 0);
366
367 if (!printed_version++)
368 printk(version);
369
370 /* Init network device */
371 dev = alloc_etherdev(sizeof(*db));
372 if (dev == NULL)
373 return -ENOMEM;
374 SET_MODULE_OWNER(dev);
375 SET_NETDEV_DEV(dev, &pdev->dev);
376
cb199d42 377 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
f67ba792
ML
378 printk(KERN_WARNING DRV_NAME
379 ": 32-bit PCI DMA not available.\n");
1da177e4
LT
380 err = -ENODEV;
381 goto err_out_free;
382 }
383
384 /* Enable Master/IO access, Disable memory access */
385 err = pci_enable_device(pdev);
386 if (err)
387 goto err_out_free;
388
389 if (!pci_resource_start(pdev, 0)) {
390 printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
391 err = -ENODEV;
392 goto err_out_disable;
393 }
394
395 /* Read Chip revision */
396 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
397
398 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
399 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
400 err = -ENODEV;
401 goto err_out_disable;
402 }
403
404#if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
405
406 /* Set Latency Timer 80h */
407 /* FIXME: setting values > 32 breaks some SiS 559x stuff.
408 Need a PCI quirk.. */
409
410 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
411#endif
412
413 if (pci_request_regions(pdev, DRV_NAME)) {
414 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
415 err = -ENODEV;
416 goto err_out_disable;
417 }
418
419 /* Init system & device */
420 db = netdev_priv(dev);
421
422 /* Allocate Tx/Rx descriptor memory */
f67ba792
ML
423 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
424 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
425
426 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
427 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
1da177e4
LT
428
429 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
430 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
431 db->buf_pool_start = db->buf_pool_ptr;
432 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
433
434 db->chip_id = ent->driver_data;
435 db->ioaddr = pci_resource_start(pdev, 0);
436 db->chip_revision = dev_rev;
f1069046 437 db->wol_mode = 0;
1da177e4
LT
438
439 db->pdev = pdev;
440
441 dev->base_addr = db->ioaddr;
442 dev->irq = pdev->irq;
443 pci_set_drvdata(pdev, dev);
444 dev->open = &dmfe_open;
445 dev->hard_start_xmit = &dmfe_start_xmit;
446 dev->stop = &dmfe_stop;
447 dev->get_stats = &dmfe_get_stats;
448 dev->set_multicast_list = &dmfe_set_filter_mode;
449#ifdef CONFIG_NET_POLL_CONTROLLER
450 dev->poll_controller = &poll_dmfe;
451#endif
452 dev->ethtool_ops = &netdev_ethtool_ops;
cfa51b9d 453 netif_carrier_off(dev);
1da177e4
LT
454 spin_lock_init(&db->lock);
455
456 pci_read_config_dword(pdev, 0x50, &pci_pmr);
457 pci_pmr &= 0x70000;
458 if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
459 db->chip_type = 1; /* DM9102A E3 */
460 else
461 db->chip_type = 0;
462
463 /* read 64 word srom data */
464 for (i = 0; i < 64; i++)
a31e40f6 465 ((__le16 *) db->srom)[i] =
f67ba792 466 cpu_to_le16(read_srom_word(db->ioaddr, i));
1da177e4
LT
467
468 /* Set Node address */
469 for (i = 0; i < 6; i++)
470 dev->dev_addr[i] = db->srom[20 + i];
471
472 err = register_netdev (dev);
473 if (err)
474 goto err_out_res;
475
476 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,",
477 dev->name,
478 ent->driver_data >> 16,
479 pci_name(pdev));
480 for (i = 0; i < 6; i++)
481 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
482 printk(", irq %d.\n", dev->irq);
483
484 pci_set_master(pdev);
485
486 return 0;
487
488err_out_res:
489 pci_release_regions(pdev);
490err_out_disable:
491 pci_disable_device(pdev);
492err_out_free:
493 pci_set_drvdata(pdev, NULL);
494 free_netdev(dev);
495
496 return err;
497}
498
499
500static void __devexit dmfe_remove_one (struct pci_dev *pdev)
501{
502 struct net_device *dev = pci_get_drvdata(pdev);
503 struct dmfe_board_info *db = netdev_priv(dev);
504
505 DMFE_DBUG(0, "dmfe_remove_one()", 0);
506
507 if (dev) {
4dc68f3d
ML
508
509 unregister_netdev(dev);
510
1da177e4
LT
511 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
512 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
513 db->desc_pool_dma_ptr);
514 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
515 db->buf_pool_ptr, db->buf_pool_dma_ptr);
1da177e4
LT
516 pci_release_regions(pdev);
517 free_netdev(dev); /* free board information */
4dc68f3d 518
1da177e4
LT
519 pci_set_drvdata(pdev, NULL);
520 }
521
522 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
523}
524
525
526/*
527 * Open the interface.
528 * The interface is opened whenever "ifconfig" actives it.
529 */
530
531static int dmfe_open(struct DEVICE *dev)
532{
533 int ret;
534 struct dmfe_board_info *db = netdev_priv(dev);
535
536 DMFE_DBUG(0, "dmfe_open", 0);
537
f67ba792
ML
538 ret = request_irq(dev->irq, &dmfe_interrupt,
539 IRQF_SHARED, dev->name, dev);
1da177e4
LT
540 if (ret)
541 return ret;
542
543 /* system variable init */
544 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
545 db->tx_packet_cnt = 0;
546 db->tx_queue_cnt = 0;
547 db->rx_avail_cnt = 0;
1da177e4
LT
548 db->wait_reset = 0;
549
550 db->first_in_callback = 0;
551 db->NIC_capability = 0xf; /* All capability*/
552 db->PHY_reg4 = 0x1e0;
553
554 /* CR6 operation mode decision */
555 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
556 (db->chip_revision >= 0x02000030) ) {
557 db->cr6_data |= DMFE_TXTH_256;
558 db->cr0_data = CR0_DEFAULT;
559 db->dm910x_chk_mode=4; /* Enter the normal mode */
560 } else {
561 db->cr6_data |= CR6_SFT; /* Store & Forward mode */
562 db->cr0_data = 0;
563 db->dm910x_chk_mode = 1; /* Enter the check mode */
564 }
565
566 /* Initilize DM910X board */
567 dmfe_init_dm910x(dev);
568
569 /* Active System Interface */
570 netif_wake_queue(dev);
571
572 /* set and active a timer process */
573 init_timer(&db->timer);
574 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
575 db->timer.data = (unsigned long)dev;
576 db->timer.function = &dmfe_timer;
577 add_timer(&db->timer);
578
579 return 0;
580}
581
582
583/* Initilize DM910X board
584 * Reset DM910X board
585 * Initilize TX/Rx descriptor chain structure
586 * Send the set-up frame
587 * Enable Tx/Rx machine
588 */
589
590static void dmfe_init_dm910x(struct DEVICE *dev)
591{
592 struct dmfe_board_info *db = netdev_priv(dev);
593 unsigned long ioaddr = db->ioaddr;
594
595 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
596
597 /* Reset DM910x MAC controller */
598 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
599 udelay(100);
600 outl(db->cr0_data, ioaddr + DCR0);
601 udelay(5);
602
603 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
604 db->phy_addr = 1;
605
606 /* Parser SROM and media mode */
607 dmfe_parse_srom(db);
608 db->media_mode = dmfe_media_mode;
609
610 /* RESET Phyxcer Chip by GPR port bit 7 */
611 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
612 if (db->chip_id == PCI_DM9009_ID) {
613 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */
614 mdelay(300); /* Delay 300 ms */
615 }
616 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */
617
618 /* Process Phyxcer Media Mode */
619 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
620 dmfe_set_phyxcer(db);
621
622 /* Media Mode Process */
623 if ( !(db->media_mode & DMFE_AUTO) )
624 db->op_mode = db->media_mode; /* Force Mode */
625
626 /* Initiliaze Transmit/Receive decriptor and CR3/4 */
627 dmfe_descriptor_init(db, ioaddr);
628
629 /* Init CR6 to program DM910x operation */
630 update_cr6(db->cr6_data, ioaddr);
631
632 /* Send setup frame */
633 if (db->chip_id == PCI_DM9132_ID)
634 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
635 else
636 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
637
638 /* Init CR7, interrupt active bit */
639 db->cr7_data = CR7_DEFAULT;
640 outl(db->cr7_data, ioaddr + DCR7);
641
642 /* Init CR15, Tx jabber and Rx watchdog timer */
643 outl(db->cr15_data, ioaddr + DCR15);
644
645 /* Enable DM910X Tx/Rx function */
646 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
647 update_cr6(db->cr6_data, ioaddr);
648}
649
650
651/*
652 * Hardware start transmission.
653 * Send a packet to media from the upper layer.
654 */
655
656static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
657{
658 struct dmfe_board_info *db = netdev_priv(dev);
659 struct tx_desc *txptr;
660 unsigned long flags;
661
662 DMFE_DBUG(0, "dmfe_start_xmit", 0);
663
664 /* Resource flag check */
665 netif_stop_queue(dev);
666
667 /* Too large packet check */
668 if (skb->len > MAX_PACKET_SIZE) {
669 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
670 dev_kfree_skb(skb);
671 return 0;
672 }
673
674 spin_lock_irqsave(&db->lock, flags);
675
676 /* No Tx resource check, it never happen nromally */
677 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
678 spin_unlock_irqrestore(&db->lock, flags);
f67ba792
ML
679 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
680 db->tx_queue_cnt);
1da177e4
LT
681 return 1;
682 }
683
684 /* Disable NIC interrupt */
685 outl(0, dev->base_addr + DCR7);
686
687 /* transmit this packet */
688 txptr = db->tx_insert_ptr;
d626f62b 689 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
1da177e4
LT
690 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
691
692 /* Point to next transmit free descriptor */
693 db->tx_insert_ptr = txptr->next_tx_desc;
694
695 /* Transmit Packet Process */
696 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
697 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
698 db->tx_packet_cnt++; /* Ready to send */
699 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
700 dev->trans_start = jiffies; /* saved time stamp */
701 } else {
702 db->tx_queue_cnt++; /* queue TX packet */
703 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
704 }
705
706 /* Tx resource check */
707 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
708 netif_wake_queue(dev);
709
710 /* Restore CR7 to enable interrupt */
711 spin_unlock_irqrestore(&db->lock, flags);
712 outl(db->cr7_data, dev->base_addr + DCR7);
713
714 /* free this SKB */
715 dev_kfree_skb(skb);
716
717 return 0;
718}
719
720
721/*
722 * Stop the interface.
723 * The interface is stopped when it is brought.
724 */
725
726static int dmfe_stop(struct DEVICE *dev)
727{
728 struct dmfe_board_info *db = netdev_priv(dev);
729 unsigned long ioaddr = dev->base_addr;
730
731 DMFE_DBUG(0, "dmfe_stop", 0);
732
733 /* disable system */
734 netif_stop_queue(dev);
735
736 /* deleted timer */
737 del_timer_sync(&db->timer);
738
739 /* Reset & stop DM910X board */
740 outl(DM910X_RESET, ioaddr + DCR0);
741 udelay(5);
742 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
743
744 /* free interrupt */
745 free_irq(dev->irq, dev);
746
747 /* free allocated rx buffer */
748 dmfe_free_rxbuffer(db);
749
750#if 0
751 /* show statistic counter */
f67ba792
ML
752 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
753 " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
1da177e4
LT
754 db->tx_fifo_underrun, db->tx_excessive_collision,
755 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
756 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
757 db->reset_fatal, db->reset_TXtimeout);
758#endif
759
760 return 0;
761}
762
763
764/*
765 * DM9102 insterrupt handler
766 * receive the packet to upper layer, free the transmitted packet
767 */
768
7d12e780 769static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
1da177e4
LT
770{
771 struct DEVICE *dev = dev_id;
772 struct dmfe_board_info *db = netdev_priv(dev);
773 unsigned long ioaddr = dev->base_addr;
774 unsigned long flags;
775
776 DMFE_DBUG(0, "dmfe_interrupt()", 0);
777
1da177e4
LT
778 spin_lock_irqsave(&db->lock, flags);
779
780 /* Got DM910X status */
781 db->cr5_data = inl(ioaddr + DCR5);
782 outl(db->cr5_data, ioaddr + DCR5);
783 if ( !(db->cr5_data & 0xc1) ) {
784 spin_unlock_irqrestore(&db->lock, flags);
785 return IRQ_HANDLED;
786 }
787
788 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
789 outl(0, ioaddr + DCR7);
790
791 /* Check system status */
792 if (db->cr5_data & 0x2000) {
793 /* system bus error happen */
794 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
795 db->reset_fatal++;
796 db->wait_reset = 1; /* Need to RESET */
797 spin_unlock_irqrestore(&db->lock, flags);
798 return IRQ_HANDLED;
799 }
800
801 /* Received the coming packet */
802 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
803 dmfe_rx_packet(dev, db);
804
805 /* reallocate rx descriptor buffer */
806 if (db->rx_avail_cnt<RX_DESC_CNT)
807 allocate_rx_buffer(db);
808
809 /* Free the transmitted descriptor */
810 if ( db->cr5_data & 0x01)
811 dmfe_free_tx_pkt(dev, db);
812
813 /* Mode Check */
814 if (db->dm910x_chk_mode & 0x2) {
815 db->dm910x_chk_mode = 0x4;
816 db->cr6_data |= 0x100;
817 update_cr6(db->cr6_data, db->ioaddr);
818 }
819
820 /* Restore CR7 to enable interrupt mask */
821 outl(db->cr7_data, ioaddr + DCR7);
822
823 spin_unlock_irqrestore(&db->lock, flags);
824 return IRQ_HANDLED;
825}
826
827
828#ifdef CONFIG_NET_POLL_CONTROLLER
829/*
830 * Polling 'interrupt' - used by things like netconsole to send skbs
831 * without having to re-enable interrupts. It's not called while
832 * the interrupt routine is executing.
833 */
834
835static void poll_dmfe (struct net_device *dev)
836{
837 /* disable_irq here is not very nice, but with the lockless
838 interrupt handler we have no other choice. */
839 disable_irq(dev->irq);
7d12e780 840 dmfe_interrupt (dev->irq, dev);
1da177e4
LT
841 enable_irq(dev->irq);
842}
843#endif
844
845/*
846 * Free TX resource after TX complete
847 */
848
849static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
850{
851 struct tx_desc *txptr;
852 unsigned long ioaddr = dev->base_addr;
853 u32 tdes0;
854
855 txptr = db->tx_remove_ptr;
856 while(db->tx_packet_cnt) {
857 tdes0 = le32_to_cpu(txptr->tdes0);
858 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
859 if (tdes0 & 0x80000000)
860 break;
861
862 /* A packet sent completed */
863 db->tx_packet_cnt--;
864 db->stats.tx_packets++;
865
866 /* Transmit statistic counter */
867 if ( tdes0 != 0x7fffffff ) {
868 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
869 db->stats.collisions += (tdes0 >> 3) & 0xf;
870 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
871 if (tdes0 & TDES0_ERR_MASK) {
872 db->stats.tx_errors++;
873
874 if (tdes0 & 0x0002) { /* UnderRun */
875 db->tx_fifo_underrun++;
876 if ( !(db->cr6_data & CR6_SFT) ) {
877 db->cr6_data = db->cr6_data | CR6_SFT;
878 update_cr6(db->cr6_data, db->ioaddr);
879 }
880 }
881 if (tdes0 & 0x0100)
882 db->tx_excessive_collision++;
883 if (tdes0 & 0x0200)
884 db->tx_late_collision++;
885 if (tdes0 & 0x0400)
886 db->tx_no_carrier++;
887 if (tdes0 & 0x0800)
888 db->tx_loss_carrier++;
889 if (tdes0 & 0x4000)
890 db->tx_jabber_timeout++;
891 }
892 }
893
894 txptr = txptr->next_tx_desc;
895 }/* End of while */
896
897 /* Update TX remove pointer to next */
898 db->tx_remove_ptr = txptr;
899
900 /* Send the Tx packet in queue */
901 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
902 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
903 db->tx_packet_cnt++; /* Ready to send */
904 db->tx_queue_cnt--;
905 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
906 dev->trans_start = jiffies; /* saved time stamp */
907 }
908
909 /* Resource available check */
910 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
911 netif_wake_queue(dev); /* Active upper layer, send again */
912}
913
914
915/*
916 * Calculate the CRC valude of the Rx packet
917 * flag = 1 : return the reverse CRC (for the received packet CRC)
918 * 0 : return the normal CRC (for Hash Table index)
919 */
920
921static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
922{
923 u32 crc = crc32(~0, Data, Len);
924 if (flag) crc = ~crc;
925 return crc;
926}
927
928
929/*
930 * Receive the come packet and pass to upper layer
931 */
932
933static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
934{
935 struct rx_desc *rxptr;
4dc68f3d 936 struct sk_buff *skb, *newskb;
1da177e4
LT
937 int rxlen;
938 u32 rdes0;
939
940 rxptr = db->rx_ready_ptr;
941
942 while(db->rx_avail_cnt) {
943 rdes0 = le32_to_cpu(rxptr->rdes0);
944 if (rdes0 & 0x80000000) /* packet owner check */
945 break;
946
947 db->rx_avail_cnt--;
948 db->interval_rx_cnt++;
949
f67ba792
ML
950 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
951 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
952
1da177e4
LT
953 if ( (rdes0 & 0x300) != 0x300) {
954 /* A packet without First/Last flag */
955 /* reuse this SKB */
956 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
957 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
958 } else {
959 /* A packet with First/Last flag */
960 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
961
962 /* error summary bit check */
963 if (rdes0 & 0x8000) {
964 /* This is a error packet */
965 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
966 db->stats.rx_errors++;
967 if (rdes0 & 1)
968 db->stats.rx_fifo_errors++;
969 if (rdes0 & 2)
970 db->stats.rx_crc_errors++;
971 if (rdes0 & 0x80)
972 db->stats.rx_length_errors++;
973 }
974
975 if ( !(rdes0 & 0x8000) ||
976 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
977 skb = rxptr->rx_skb_ptr;
978
979 /* Received Packet CRC check need or not */
980 if ( (db->dm910x_chk_mode & 1) &&
689be439
DM
981 (cal_CRC(skb->data, rxlen, 1) !=
982 (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
1da177e4
LT
983 /* Found a error received packet */
984 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
985 db->dm910x_chk_mode = 3;
986 } else {
987 /* Good packet, send to upper layer */
988 /* Shorst packet used new SKB */
4dc68f3d
ML
989 if ((rxlen < RX_COPY_SIZE) &&
990 ((newskb = dev_alloc_skb(rxlen + 2))
991 != NULL)) {
992
993 skb = newskb;
1da177e4 994 /* size less than COPY_SIZE, allocate a rxlen SKB */
1da177e4 995 skb_reserve(skb, 2); /* 16byte align */
d626f62b
ACM
996 skb_copy_from_linear_data(rxptr->rx_skb_ptr,
997 skb_put(skb, rxlen),
998 rxlen);
1da177e4 999 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
4c13eb66 1000 } else
1da177e4 1001 skb_put(skb, rxlen);
4c13eb66 1002
1da177e4
LT
1003 skb->protocol = eth_type_trans(skb, dev);
1004 netif_rx(skb);
1005 dev->last_rx = jiffies;
1006 db->stats.rx_packets++;
1007 db->stats.rx_bytes += rxlen;
1008 }
1009 } else {
1010 /* Reuse SKB buffer when the packet is error */
1011 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1012 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1013 }
1014 }
1015
1016 rxptr = rxptr->next_rx_desc;
1017 }
1018
1019 db->rx_ready_ptr = rxptr;
1020}
1021
1022
1023/*
1024 * Get statistics from driver.
1025 */
1026
1027static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
1028{
1029 struct dmfe_board_info *db = netdev_priv(dev);
1030
1031 DMFE_DBUG(0, "dmfe_get_stats", 0);
1032 return &db->stats;
1033}
1034
1035
1036/*
1037 * Set DM910X multicast address
1038 */
1039
1040static void dmfe_set_filter_mode(struct DEVICE * dev)
1041{
1042 struct dmfe_board_info *db = netdev_priv(dev);
1043 unsigned long flags;
1044
1045 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1046 spin_lock_irqsave(&db->lock, flags);
1047
1048 if (dev->flags & IFF_PROMISC) {
1049 DMFE_DBUG(0, "Enable PROM Mode", 0);
1050 db->cr6_data |= CR6_PM | CR6_PBF;
1051 update_cr6(db->cr6_data, db->ioaddr);
1052 spin_unlock_irqrestore(&db->lock, flags);
1053 return;
1054 }
1055
1056 if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
1057 DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
1058 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1059 db->cr6_data |= CR6_PAM;
1060 spin_unlock_irqrestore(&db->lock, flags);
1061 return;
1062 }
1063
1064 DMFE_DBUG(0, "Set multicast address", dev->mc_count);
1065 if (db->chip_id == PCI_DM9132_ID)
1066 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
1067 else
1068 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
1069 spin_unlock_irqrestore(&db->lock, flags);
1070}
1071
f1069046
ML
1072/*
1073 * Ethtool interace
1074 */
1075
1076static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1da177e4
LT
1077 struct ethtool_drvinfo *info)
1078{
1079 struct dmfe_board_info *np = netdev_priv(dev);
1080
1081 strcpy(info->driver, DRV_NAME);
1082 strcpy(info->version, DRV_VERSION);
1083 if (np->pdev)
1084 strcpy(info->bus_info, pci_name(np->pdev));
1085 else
1086 sprintf(info->bus_info, "EISA 0x%lx %d",
1087 dev->base_addr, dev->irq);
1088}
1089
f1069046
ML
1090static int dmfe_ethtool_set_wol(struct net_device *dev,
1091 struct ethtool_wolinfo *wolinfo)
1092{
1093 struct dmfe_board_info *db = netdev_priv(dev);
1094
1095 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1096 WAKE_ARP | WAKE_MAGICSECURE))
1097 return -EOPNOTSUPP;
1098
1099 db->wol_mode = wolinfo->wolopts;
1100 return 0;
1101}
1102
1103static void dmfe_ethtool_get_wol(struct net_device *dev,
1104 struct ethtool_wolinfo *wolinfo)
1105{
1106 struct dmfe_board_info *db = netdev_priv(dev);
1107
1108 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1109 wolinfo->wolopts = db->wol_mode;
1110 return;
1111}
1112
1113
7282d491 1114static const struct ethtool_ops netdev_ethtool_ops = {
f1069046 1115 .get_drvinfo = dmfe_ethtool_get_drvinfo,
cfa51b9d 1116 .get_link = ethtool_op_get_link,
f1069046
ML
1117 .set_wol = dmfe_ethtool_set_wol,
1118 .get_wol = dmfe_ethtool_get_wol,
1da177e4
LT
1119};
1120
1121/*
1122 * A periodic timer routine
1123 * Dynamic media sense, allocate Rx buffer...
1124 */
1125
1126static void dmfe_timer(unsigned long data)
1127{
1128 u32 tmp_cr8;
1129 unsigned char tmp_cr12;
1130 struct DEVICE *dev = (struct DEVICE *) data;
1131 struct dmfe_board_info *db = netdev_priv(dev);
1132 unsigned long flags;
1133
cfa51b9d
ML
1134 int link_ok, link_ok_phy;
1135
1da177e4
LT
1136 DMFE_DBUG(0, "dmfe_timer()", 0);
1137 spin_lock_irqsave(&db->lock, flags);
1138
1139 /* Media mode process when Link OK before enter this route */
1140 if (db->first_in_callback == 0) {
1141 db->first_in_callback = 1;
1142 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1143 db->cr6_data &= ~0x40000;
1144 update_cr6(db->cr6_data, db->ioaddr);
f67ba792
ML
1145 phy_write(db->ioaddr,
1146 db->phy_addr, 0, 0x1000, db->chip_id);
1da177e4
LT
1147 db->cr6_data |= 0x40000;
1148 update_cr6(db->cr6_data, db->ioaddr);
1149 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1150 add_timer(&db->timer);
1151 spin_unlock_irqrestore(&db->lock, flags);
1152 return;
1153 }
1154 }
1155
1156
1157 /* Operating Mode Check */
1158 if ( (db->dm910x_chk_mode & 0x1) &&
1159 (db->stats.rx_packets > MAX_CHECK_PACKET) )
1160 db->dm910x_chk_mode = 0x4;
1161
1162 /* Dynamic reset DM910X : system error or transmit time-out */
1163 tmp_cr8 = inl(db->ioaddr + DCR8);
1164 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1165 db->reset_cr8++;
1166 db->wait_reset = 1;
1167 }
1168 db->interval_rx_cnt = 0;
1169
1170 /* TX polling kick monitor */
1171 if ( db->tx_packet_cnt &&
1172 time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
1173 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
1174
1175 /* TX Timeout */
1176 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1177 db->reset_TXtimeout++;
1178 db->wait_reset = 1;
1179 printk(KERN_WARNING "%s: Tx timeout - resetting\n",
1180 dev->name);
1181 }
1182 }
1183
1184 if (db->wait_reset) {
1185 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1186 db->reset_count++;
1187 dmfe_dynamic_reset(dev);
1188 db->first_in_callback = 0;
1189 db->timer.expires = DMFE_TIMER_WUT;
1190 add_timer(&db->timer);
1191 spin_unlock_irqrestore(&db->lock, flags);
1192 return;
1193 }
1194
1195 /* Link status check, Dynamic media type change */
1196 if (db->chip_id == PCI_DM9132_ID)
1197 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
1198 else
1199 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
1200
1201 if ( ((db->chip_id == PCI_DM9102_ID) &&
1202 (db->chip_revision == 0x02000030)) ||
1203 ((db->chip_id == PCI_DM9132_ID) &&
1204 (db->chip_revision == 0x02000010)) ) {
1205 /* DM9102A Chip */
1206 if (tmp_cr12 & 2)
cfa51b9d 1207 link_ok = 0;
1da177e4 1208 else
cfa51b9d 1209 link_ok = 1;
1da177e4 1210 }
cfa51b9d
ML
1211 else
1212 /*0x43 is used instead of 0x3 because bit 6 should represent
1213 link status of external PHY */
1214 link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1215
1216
1217 /* If chip reports that link is failed it could be because external
1218 PHY link status pin is not conected correctly to chip
1219 To be sure ask PHY too.
1220 */
1221
1222 /* need a dummy read because of PHY's register latch*/
1223 phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1224 link_ok_phy = (phy_read (db->ioaddr,
1225 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1da177e4 1226
cfa51b9d
ML
1227 if (link_ok_phy != link_ok) {
1228 DMFE_DBUG (0, "PHY and chip report different link status", 0);
1229 link_ok = link_ok | link_ok_phy;
1230 }
1231
1232 if ( !link_ok && netif_carrier_ok(dev)) {
1da177e4
LT
1233 /* Link Failed */
1234 DMFE_DBUG(0, "Link Failed", tmp_cr12);
cfa51b9d 1235 netif_carrier_off(dev);
1da177e4
LT
1236
1237 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1238 /* AUTO or force 1M Homerun/Longrun don't need */
1239 if ( !(db->media_mode & 0x38) )
f67ba792
ML
1240 phy_write(db->ioaddr, db->phy_addr,
1241 0, 0x1000, db->chip_id);
1da177e4
LT
1242
1243 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1244 if (db->media_mode & DMFE_AUTO) {
1245 /* 10/100M link failed, used 1M Home-Net */
1246 db->cr6_data|=0x00040000; /* bit18=1, MII */
1247 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1248 update_cr6(db->cr6_data, db->ioaddr);
1249 }
cfa51b9d
ML
1250 } else if (!netif_carrier_ok(dev)) {
1251
1252 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1253
1254 /* Auto Sense Speed */
1255 if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1256 netif_carrier_on(dev);
1257 SHOW_MEDIA_TYPE(db->op_mode);
1da177e4
LT
1258 }
1259
cfa51b9d
ML
1260 dmfe_process_mode(db);
1261 }
1262
1da177e4
LT
1263 /* HPNA remote command check */
1264 if (db->HPNA_command & 0xf00) {
1265 db->HPNA_timer--;
1266 if (!db->HPNA_timer)
1267 dmfe_HPNA_remote_cmd_chk(db);
1268 }
1269
1270 /* Timer active again */
1271 db->timer.expires = DMFE_TIMER_WUT;
1272 add_timer(&db->timer);
1273 spin_unlock_irqrestore(&db->lock, flags);
1274}
1275
1276
1277/*
1278 * Dynamic reset the DM910X board
1279 * Stop DM910X board
1280 * Free Tx/Rx allocated memory
1281 * Reset DM910X board
1282 * Re-initilize DM910X board
1283 */
1284
1285static void dmfe_dynamic_reset(struct DEVICE *dev)
1286{
1287 struct dmfe_board_info *db = netdev_priv(dev);
1288
1289 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1290
1291 /* Sopt MAC controller */
1292 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1293 update_cr6(db->cr6_data, dev->base_addr);
1294 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1295 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1296
1297 /* Disable upper layer interface */
1298 netif_stop_queue(dev);
1299
1300 /* Free Rx Allocate buffer */
1301 dmfe_free_rxbuffer(db);
1302
1303 /* system variable init */
1304 db->tx_packet_cnt = 0;
1305 db->tx_queue_cnt = 0;
1306 db->rx_avail_cnt = 0;
cfa51b9d 1307 netif_carrier_off(dev);
1da177e4
LT
1308 db->wait_reset = 0;
1309
1310 /* Re-initilize DM910X board */
1311 dmfe_init_dm910x(dev);
1312
1313 /* Restart upper layer interface */
1314 netif_wake_queue(dev);
1315}
1316
1317
1318/*
1319 * free all allocated rx buffer
1320 */
1321
1322static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1323{
1324 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1325
1326 /* free allocated rx buffer */
1327 while (db->rx_avail_cnt) {
1328 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1329 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1330 db->rx_avail_cnt--;
1331 }
1332}
1333
1334
1335/*
1336 * Reuse the SK buffer
1337 */
1338
1339static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1340{
1341 struct rx_desc *rxptr = db->rx_insert_ptr;
1342
1343 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1344 rxptr->rx_skb_ptr = skb;
f67ba792
ML
1345 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1346 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1da177e4
LT
1347 wmb();
1348 rxptr->rdes0 = cpu_to_le32(0x80000000);
1349 db->rx_avail_cnt++;
1350 db->rx_insert_ptr = rxptr->next_rx_desc;
1351 } else
1352 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1353}
1354
1355
1356/*
1357 * Initialize transmit/Receive descriptor
1358 * Using Chain structure, and allocate Tx/Rx buffer
1359 */
1360
1361static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1362{
1363 struct tx_desc *tmp_tx;
1364 struct rx_desc *tmp_rx;
1365 unsigned char *tmp_buf;
1366 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1367 dma_addr_t tmp_buf_dma;
1368 int i;
1369
1370 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1371
1372 /* tx descriptor start pointer */
1373 db->tx_insert_ptr = db->first_tx_desc;
1374 db->tx_remove_ptr = db->first_tx_desc;
1375 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1376
1377 /* rx descriptor start pointer */
f67ba792
ML
1378 db->first_rx_desc = (void *)db->first_tx_desc +
1379 sizeof(struct tx_desc) * TX_DESC_CNT;
1380
1381 db->first_rx_desc_dma = db->first_tx_desc_dma +
1382 sizeof(struct tx_desc) * TX_DESC_CNT;
1da177e4
LT
1383 db->rx_insert_ptr = db->first_rx_desc;
1384 db->rx_ready_ptr = db->first_rx_desc;
1385 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1386
1387 /* Init Transmit chain */
1388 tmp_buf = db->buf_pool_start;
1389 tmp_buf_dma = db->buf_pool_dma_start;
1390 tmp_tx_dma = db->first_tx_desc_dma;
1391 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1392 tmp_tx->tx_buf_ptr = tmp_buf;
1393 tmp_tx->tdes0 = cpu_to_le32(0);
1394 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1395 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1396 tmp_tx_dma += sizeof(struct tx_desc);
1397 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1398 tmp_tx->next_tx_desc = tmp_tx + 1;
1399 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1400 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1401 }
1402 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1403 tmp_tx->next_tx_desc = db->first_tx_desc;
1404
1405 /* Init Receive descriptor chain */
1406 tmp_rx_dma=db->first_rx_desc_dma;
1407 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1408 tmp_rx->rdes0 = cpu_to_le32(0);
1409 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1410 tmp_rx_dma += sizeof(struct rx_desc);
1411 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1412 tmp_rx->next_rx_desc = tmp_rx + 1;
1413 }
1414 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1415 tmp_rx->next_rx_desc = db->first_rx_desc;
1416
1417 /* pre-allocate Rx buffer */
1418 allocate_rx_buffer(db);
1419}
1420
1421
1422/*
1423 * Update CR6 value
1424 * Firstly stop DM910X , then written value and start
1425 */
1426
1427static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1428{
1429 u32 cr6_tmp;
1430
1431 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1432 outl(cr6_tmp, ioaddr + DCR6);
1433 udelay(5);
1434 outl(cr6_data, ioaddr + DCR6);
1435 udelay(5);
1436}
1437
1438
1439/*
1440 * Send a setup frame for DM9132
1441 * This setup frame initilize DM910X address filter mode
1442*/
1443
1444static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1445{
1446 struct dev_mc_list *mcptr;
1447 u16 * addrptr;
1448 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1449 u32 hash_val;
1450 u16 i, hash_table[4];
1451
1452 DMFE_DBUG(0, "dm9132_id_table()", 0);
1453
1454 /* Node address */
1455 addrptr = (u16 *) dev->dev_addr;
1456 outw(addrptr[0], ioaddr);
1457 ioaddr += 4;
1458 outw(addrptr[1], ioaddr);
1459 ioaddr += 4;
1460 outw(addrptr[2], ioaddr);
1461 ioaddr += 4;
1462
1463 /* Clear Hash Table */
1464 for (i = 0; i < 4; i++)
1465 hash_table[i] = 0x0;
1466
1467 /* broadcast address */
1468 hash_table[3] = 0x8000;
1469
1470 /* the multicast address in Hash Table : 64 bits */
1471 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1472 hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1473 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1474 }
1475
1476 /* Write the hash table to MAC MD table */
1477 for (i = 0; i < 4; i++, ioaddr += 4)
1478 outw(hash_table[i], ioaddr);
1479}
1480
1481
1482/*
1483 * Send a setup frame for DM9102/DM9102A
1484 * This setup frame initilize DM910X address filter mode
1485 */
1486
1487static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1488{
1489 struct dmfe_board_info *db = netdev_priv(dev);
1490 struct dev_mc_list *mcptr;
1491 struct tx_desc *txptr;
1492 u16 * addrptr;
1493 u32 * suptr;
1494 int i;
1495
1496 DMFE_DBUG(0, "send_filter_frame()", 0);
1497
1498 txptr = db->tx_insert_ptr;
1499 suptr = (u32 *) txptr->tx_buf_ptr;
1500
1501 /* Node address */
1502 addrptr = (u16 *) dev->dev_addr;
1503 *suptr++ = addrptr[0];
1504 *suptr++ = addrptr[1];
1505 *suptr++ = addrptr[2];
1506
1507 /* broadcast address */
1508 *suptr++ = 0xffff;
1509 *suptr++ = 0xffff;
1510 *suptr++ = 0xffff;
1511
1512 /* fit the multicast address */
1513 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1514 addrptr = (u16 *) mcptr->dmi_addr;
1515 *suptr++ = addrptr[0];
1516 *suptr++ = addrptr[1];
1517 *suptr++ = addrptr[2];
1518 }
1519
1520 for (; i<14; i++) {
1521 *suptr++ = 0xffff;
1522 *suptr++ = 0xffff;
1523 *suptr++ = 0xffff;
1524 }
1525
1526 /* prepare the setup frame */
1527 db->tx_insert_ptr = txptr->next_tx_desc;
1528 txptr->tdes1 = cpu_to_le32(0x890000c0);
1529
1530 /* Resource Check and Send the setup packet */
1531 if (!db->tx_packet_cnt) {
1532 /* Resource Empty */
1533 db->tx_packet_cnt++;
1534 txptr->tdes0 = cpu_to_le32(0x80000000);
1535 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1536 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1537 update_cr6(db->cr6_data, dev->base_addr);
1538 dev->trans_start = jiffies;
1539 } else
1540 db->tx_queue_cnt++; /* Put in TX queue */
1541}
1542
1543
1544/*
1545 * Allocate rx buffer,
1546 * As possible as allocate maxiumn Rx buffer
1547 */
1548
1549static void allocate_rx_buffer(struct dmfe_board_info *db)
1550{
1551 struct rx_desc *rxptr;
1552 struct sk_buff *skb;
1553
1554 rxptr = db->rx_insert_ptr;
1555
1556 while(db->rx_avail_cnt < RX_DESC_CNT) {
1557 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1558 break;
1559 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
f67ba792
ML
1560 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1561 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1da177e4
LT
1562 wmb();
1563 rxptr->rdes0 = cpu_to_le32(0x80000000);
1564 rxptr = rxptr->next_rx_desc;
1565 db->rx_avail_cnt++;
1566 }
1567
1568 db->rx_insert_ptr = rxptr;
1569}
1570
1571
1572/*
1573 * Read one word data from the serial ROM
1574 */
1575
1576static u16 read_srom_word(long ioaddr, int offset)
1577{
1578 int i;
1579 u16 srom_data = 0;
1580 long cr9_ioaddr = ioaddr + DCR9;
1581
1582 outl(CR9_SROM_READ, cr9_ioaddr);
1583 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1584
1585 /* Send the Read Command 110b */
1586 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1587 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1588 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1589
1590 /* Send the offset */
1591 for (i = 5; i >= 0; i--) {
1592 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1593 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1594 }
1595
1596 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1597
1598 for (i = 16; i > 0; i--) {
1599 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1600 udelay(5);
f67ba792
ML
1601 srom_data = (srom_data << 1) |
1602 ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1da177e4
LT
1603 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1604 udelay(5);
1605 }
1606
1607 outl(CR9_SROM_READ, cr9_ioaddr);
1608 return srom_data;
1609}
1610
1611
1612/*
1613 * Auto sense the media mode
1614 */
1615
1616static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1617{
1618 u8 ErrFlag = 0;
1619 u16 phy_mode;
1620
1621 /* CR6 bit18=0, select 10/100M */
1622 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1623
1624 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1625 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1626
1627 if ( (phy_mode & 0x24) == 0x24 ) {
1628 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
f67ba792
ML
1629 phy_mode = phy_read(db->ioaddr,
1630 db->phy_addr, 7, db->chip_id) & 0xf000;
1da177e4 1631 else /* DM9102/DM9102A */
f67ba792
ML
1632 phy_mode = phy_read(db->ioaddr,
1633 db->phy_addr, 17, db->chip_id) & 0xf000;
1da177e4
LT
1634 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1635 switch (phy_mode) {
1636 case 0x1000: db->op_mode = DMFE_10MHF; break;
1637 case 0x2000: db->op_mode = DMFE_10MFD; break;
1638 case 0x4000: db->op_mode = DMFE_100MHF; break;
1639 case 0x8000: db->op_mode = DMFE_100MFD; break;
1640 default: db->op_mode = DMFE_10MHF;
1641 ErrFlag = 1;
1642 break;
1643 }
1644 } else {
1645 db->op_mode = DMFE_10MHF;
1646 DMFE_DBUG(0, "Link Failed :", phy_mode);
1647 ErrFlag = 1;
1648 }
1649
1650 return ErrFlag;
1651}
1652
1653
1654/*
1655 * Set 10/100 phyxcer capability
1656 * AUTO mode : phyxcer register4 is NIC capability
1657 * Force mode: phyxcer register4 is the force media
1658 */
1659
1660static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1661{
1662 u16 phy_reg;
1663
1664 /* Select 10/100M phyxcer */
1665 db->cr6_data &= ~0x40000;
1666 update_cr6(db->cr6_data, db->ioaddr);
1667
1668 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1669 if (db->chip_id == PCI_DM9009_ID) {
f67ba792
ML
1670 phy_reg = phy_read(db->ioaddr,
1671 db->phy_addr, 18, db->chip_id) & ~0x1000;
1672
1673 phy_write(db->ioaddr,
1674 db->phy_addr, 18, phy_reg, db->chip_id);
1da177e4
LT
1675 }
1676
1677 /* Phyxcer capability setting */
1678 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1679
1680 if (db->media_mode & DMFE_AUTO) {
1681 /* AUTO Mode */
1682 phy_reg |= db->PHY_reg4;
1683 } else {
1684 /* Force Mode */
1685 switch(db->media_mode) {
1686 case DMFE_10MHF: phy_reg |= 0x20; break;
1687 case DMFE_10MFD: phy_reg |= 0x40; break;
1688 case DMFE_100MHF: phy_reg |= 0x80; break;
1689 case DMFE_100MFD: phy_reg |= 0x100; break;
1690 }
1691 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1692 }
1693
1694 /* Write new capability to Phyxcer Reg4 */
1695 if ( !(phy_reg & 0x01e0)) {
1696 phy_reg|=db->PHY_reg4;
1697 db->media_mode|=DMFE_AUTO;
1698 }
1699 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1700
1701 /* Restart Auto-Negotiation */
1702 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1703 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1704 if ( !db->chip_type )
1705 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1706}
1707
1708
1709/*
1710 * Process op-mode
1711 * AUTO mode : PHY controller in Auto-negotiation Mode
1712 * Force mode: PHY controller in force mode with HUB
1713 * N-way force capability with SWITCH
1714 */
1715
1716static void dmfe_process_mode(struct dmfe_board_info *db)
1717{
1718 u16 phy_reg;
1719
1720 /* Full Duplex Mode Check */
1721 if (db->op_mode & 0x4)
1722 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1723 else
1724 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1725
1726 /* Transciver Selection */
1727 if (db->op_mode & 0x10) /* 1M HomePNA */
1728 db->cr6_data |= 0x40000;/* External MII select */
1729 else
1730 db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1731
1732 update_cr6(db->cr6_data, db->ioaddr);
1733
1734 /* 10/100M phyxcer force mode need */
1735 if ( !(db->media_mode & 0x18)) {
1736 /* Forece Mode */
1737 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1738 if ( !(phy_reg & 0x1) ) {
1739 /* parter without N-Way capability */
1740 phy_reg = 0x0;
1741 switch(db->op_mode) {
1742 case DMFE_10MHF: phy_reg = 0x0; break;
1743 case DMFE_10MFD: phy_reg = 0x100; break;
1744 case DMFE_100MHF: phy_reg = 0x2000; break;
1745 case DMFE_100MFD: phy_reg = 0x2100; break;
1746 }
f67ba792
ML
1747 phy_write(db->ioaddr,
1748 db->phy_addr, 0, phy_reg, db->chip_id);
1da177e4
LT
1749 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1750 mdelay(20);
f67ba792
ML
1751 phy_write(db->ioaddr,
1752 db->phy_addr, 0, phy_reg, db->chip_id);
1da177e4
LT
1753 }
1754 }
1755}
1756
1757
1758/*
1759 * Write a word to Phy register
1760 */
1761
f67ba792
ML
1762static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1763 u16 phy_data, u32 chip_id)
1da177e4
LT
1764{
1765 u16 i;
1766 unsigned long ioaddr;
1767
1768 if (chip_id == PCI_DM9132_ID) {
1769 ioaddr = iobase + 0x80 + offset * 4;
1770 outw(phy_data, ioaddr);
1771 } else {
1772 /* DM9102/DM9102A Chip */
1773 ioaddr = iobase + DCR9;
1774
1775 /* Send 33 synchronization clock to Phy controller */
1776 for (i = 0; i < 35; i++)
1777 phy_write_1bit(ioaddr, PHY_DATA_1);
1778
1779 /* Send start command(01) to Phy */
1780 phy_write_1bit(ioaddr, PHY_DATA_0);
1781 phy_write_1bit(ioaddr, PHY_DATA_1);
1782
1783 /* Send write command(01) to Phy */
1784 phy_write_1bit(ioaddr, PHY_DATA_0);
1785 phy_write_1bit(ioaddr, PHY_DATA_1);
1786
1787 /* Send Phy address */
1788 for (i = 0x10; i > 0; i = i >> 1)
f67ba792
ML
1789 phy_write_1bit(ioaddr,
1790 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1da177e4
LT
1791
1792 /* Send register address */
1793 for (i = 0x10; i > 0; i = i >> 1)
f67ba792
ML
1794 phy_write_1bit(ioaddr,
1795 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1da177e4
LT
1796
1797 /* written trasnition */
1798 phy_write_1bit(ioaddr, PHY_DATA_1);
1799 phy_write_1bit(ioaddr, PHY_DATA_0);
1800
1801 /* Write a word data to PHY controller */
1802 for ( i = 0x8000; i > 0; i >>= 1)
f67ba792
ML
1803 phy_write_1bit(ioaddr,
1804 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1da177e4
LT
1805 }
1806}
1807
1808
1809/*
1810 * Read a word data from phy register
1811 */
1812
1813static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1814{
1815 int i;
1816 u16 phy_data;
1817 unsigned long ioaddr;
1818
1819 if (chip_id == PCI_DM9132_ID) {
1820 /* DM9132 Chip */
1821 ioaddr = iobase + 0x80 + offset * 4;
1822 phy_data = inw(ioaddr);
1823 } else {
1824 /* DM9102/DM9102A Chip */
1825 ioaddr = iobase + DCR9;
1826
1827 /* Send 33 synchronization clock to Phy controller */
1828 for (i = 0; i < 35; i++)
1829 phy_write_1bit(ioaddr, PHY_DATA_1);
1830
1831 /* Send start command(01) to Phy */
1832 phy_write_1bit(ioaddr, PHY_DATA_0);
1833 phy_write_1bit(ioaddr, PHY_DATA_1);
1834
1835 /* Send read command(10) to Phy */
1836 phy_write_1bit(ioaddr, PHY_DATA_1);
1837 phy_write_1bit(ioaddr, PHY_DATA_0);
1838
1839 /* Send Phy address */
1840 for (i = 0x10; i > 0; i = i >> 1)
f67ba792
ML
1841 phy_write_1bit(ioaddr,
1842 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1da177e4
LT
1843
1844 /* Send register address */
1845 for (i = 0x10; i > 0; i = i >> 1)
f67ba792
ML
1846 phy_write_1bit(ioaddr,
1847 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1da177e4
LT
1848
1849 /* Skip transition state */
1850 phy_read_1bit(ioaddr);
1851
1852 /* read 16bit data */
1853 for (phy_data = 0, i = 0; i < 16; i++) {
1854 phy_data <<= 1;
1855 phy_data |= phy_read_1bit(ioaddr);
1856 }
1857 }
1858
1859 return phy_data;
1860}
1861
1862
1863/*
1864 * Write one bit data to Phy Controller
1865 */
1866
1867static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1868{
1869 outl(phy_data, ioaddr); /* MII Clock Low */
1870 udelay(1);
1871 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1872 udelay(1);
1873 outl(phy_data, ioaddr); /* MII Clock Low */
1874 udelay(1);
1875}
1876
1877
1878/*
1879 * Read one bit phy data from PHY controller
1880 */
1881
1882static u16 phy_read_1bit(unsigned long ioaddr)
1883{
1884 u16 phy_data;
1885
1886 outl(0x50000, ioaddr);
1887 udelay(1);
1888 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1889 outl(0x40000, ioaddr);
1890 udelay(1);
1891
1892 return phy_data;
1893}
1894
1895
1896/*
1897 * Parser SROM and media mode
1898 */
1899
1900static void dmfe_parse_srom(struct dmfe_board_info * db)
1901{
1902 char * srom = db->srom;
1903 int dmfe_mode, tmp_reg;
1904
1905 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1906
1907 /* Init CR15 */
1908 db->cr15_data = CR15_DEFAULT;
1909
1910 /* Check SROM Version */
1911 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1912 /* SROM V4.01 */
1913 /* Get NIC support media mode */
16b110c3 1914 db->NIC_capability = le16_to_cpup((__le16 *)srom + 34/2);
1da177e4
LT
1915 db->PHY_reg4 = 0;
1916 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1917 switch( db->NIC_capability & tmp_reg ) {
1918 case 0x1: db->PHY_reg4 |= 0x0020; break;
1919 case 0x2: db->PHY_reg4 |= 0x0040; break;
1920 case 0x4: db->PHY_reg4 |= 0x0080; break;
1921 case 0x8: db->PHY_reg4 |= 0x0100; break;
1922 }
1923 }
1924
1925 /* Media Mode Force or not check */
16b110c3
AM
1926 dmfe_mode = le32_to_cpup((__le32 *)srom + 34/4) &
1927 le32_to_cpup((__le32 *)srom + 36/4);
1da177e4
LT
1928 switch(dmfe_mode) {
1929 case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1930 case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
1931 case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1932 case 0x100:
1933 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1934 }
1935
1936 /* Special Function setting */
1937 /* VLAN function */
1938 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1939 db->cr15_data |= 0x40;
1940
1941 /* Flow Control */
1942 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1943 db->cr15_data |= 0x400;
1944
1945 /* TX pause packet */
1946 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1947 db->cr15_data |= 0x9800;
1948 }
1949
1950 /* Parse HPNA parameter */
1951 db->HPNA_command = 1;
1952
1953 /* Accept remote command or not */
1954 if (HPNA_rx_cmd == 0)
1955 db->HPNA_command |= 0x8000;
1956
1957 /* Issue remote command & operation mode */
1958 if (HPNA_tx_cmd == 1)
1959 switch(HPNA_mode) { /* Issue Remote Command */
1960 case 0: db->HPNA_command |= 0x0904; break;
1961 case 1: db->HPNA_command |= 0x0a00; break;
1962 case 2: db->HPNA_command |= 0x0506; break;
1963 case 3: db->HPNA_command |= 0x0602; break;
1964 }
1965 else
1966 switch(HPNA_mode) { /* Don't Issue */
1967 case 0: db->HPNA_command |= 0x0004; break;
1968 case 1: db->HPNA_command |= 0x0000; break;
1969 case 2: db->HPNA_command |= 0x0006; break;
1970 case 3: db->HPNA_command |= 0x0002; break;
1971 }
1972
1973 /* Check DM9801 or DM9802 present or not */
1974 db->HPNA_present = 0;
1975 update_cr6(db->cr6_data|0x40000, db->ioaddr);
1976 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1977 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1978 /* DM9801 or DM9802 present */
1979 db->HPNA_timer = 8;
1980 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1981 /* DM9801 HomeRun */
1982 db->HPNA_present = 1;
1983 dmfe_program_DM9801(db, tmp_reg);
1984 } else {
1985 /* DM9802 LongRun */
1986 db->HPNA_present = 2;
1987 dmfe_program_DM9802(db);
1988 }
1989 }
1990
1991}
1992
1993
1994/*
1995 * Init HomeRun DM9801
1996 */
1997
1998static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1999{
2000 uint reg17, reg25;
2001
2002 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2003 switch(HPNA_rev) {
2004 case 0xb900: /* DM9801 E3 */
2005 db->HPNA_command |= 0x1000;
2006 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2007 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2008 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2009 break;
2010 case 0xb901: /* DM9801 E4 */
2011 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2012 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2013 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2014 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2015 break;
2016 case 0xb902: /* DM9801 E5 */
2017 case 0xb903: /* DM9801 E6 */
2018 default:
2019 db->HPNA_command |= 0x1000;
2020 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2021 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2022 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2023 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2024 break;
2025 }
2026 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2027 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2028 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2029}
2030
2031
2032/*
2033 * Init HomeRun DM9802
2034 */
2035
2036static void dmfe_program_DM9802(struct dmfe_board_info * db)
2037{
2038 uint phy_reg;
2039
2040 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2041 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2042 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2043 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2044 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2045}
2046
2047
2048/*
2049 * Check remote HPNA power and speed status. If not correct,
2050 * issue command again.
2051*/
2052
2053static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2054{
2055 uint phy_reg;
2056
2057 /* Got remote device status */
2058 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2059 switch(phy_reg) {
2060 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2061 case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2062 case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2063 case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2064 }
2065
2066 /* Check remote device status match our setting ot not */
2067 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
f67ba792
ML
2068 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2069 db->chip_id);
1da177e4
LT
2070 db->HPNA_timer=8;
2071 } else
2072 db->HPNA_timer=600; /* Match, every 10 minutes, check */
2073}
2074
2075
2076
2077static struct pci_device_id dmfe_pci_tbl[] = {
2078 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2079 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2080 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2081 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2082 { 0, }
2083};
2084MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2085
2086
bc8a8387
ML
2087#ifdef CONFIG_PM
2088static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2089{
2090 struct net_device *dev = pci_get_drvdata(pci_dev);
2091 struct dmfe_board_info *db = netdev_priv(dev);
f1069046 2092 u32 tmp;
bc8a8387
ML
2093
2094 /* Disable upper layer interface */
2095 netif_device_detach(dev);
2096
2097 /* Disable Tx/Rx */
2098 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2099 update_cr6(db->cr6_data, dev->base_addr);
2100
2101 /* Disable Interrupt */
2102 outl(0, dev->base_addr + DCR7);
2103 outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5);
2104
2105 /* Fre RX buffers */
2106 dmfe_free_rxbuffer(db);
2107
f1069046
ML
2108 /* Enable WOL */
2109 pci_read_config_dword(pci_dev, 0x40, &tmp);
2110 tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2111
2112 if (db->wol_mode & WAKE_PHY)
2113 tmp |= DMFE_WOL_LINKCHANGE;
2114 if (db->wol_mode & WAKE_MAGIC)
2115 tmp |= DMFE_WOL_MAGICPACKET;
2116
2117 pci_write_config_dword(pci_dev, 0x40, tmp);
2118
2119 pci_enable_wake(pci_dev, PCI_D3hot, 1);
2120 pci_enable_wake(pci_dev, PCI_D3cold, 1);
2121
bc8a8387
ML
2122 /* Power down device*/
2123 pci_set_power_state(pci_dev, pci_choose_state (pci_dev,state));
2124 pci_save_state(pci_dev);
2125
2126 return 0;
2127}
2128
2129static int dmfe_resume(struct pci_dev *pci_dev)
2130{
2131 struct net_device *dev = pci_get_drvdata(pci_dev);
f1069046 2132 u32 tmp;
bc8a8387
ML
2133
2134 pci_restore_state(pci_dev);
2135 pci_set_power_state(pci_dev, PCI_D0);
2136
2137 /* Re-initilize DM910X board */
2138 dmfe_init_dm910x(dev);
2139
f1069046
ML
2140 /* Disable WOL */
2141 pci_read_config_dword(pci_dev, 0x40, &tmp);
2142
2143 tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2144 pci_write_config_dword(pci_dev, 0x40, tmp);
2145
2146 pci_enable_wake(pci_dev, PCI_D3hot, 0);
2147 pci_enable_wake(pci_dev, PCI_D3cold, 0);
2148
bc8a8387
ML
2149 /* Restart upper layer interface */
2150 netif_device_attach(dev);
2151
2152 return 0;
2153}
2154#else
2155#define dmfe_suspend NULL
2156#define dmfe_resume NULL
2157#endif
2158
1da177e4
LT
2159static struct pci_driver dmfe_driver = {
2160 .name = "dmfe",
2161 .id_table = dmfe_pci_tbl,
2162 .probe = dmfe_init_one,
2163 .remove = __devexit_p(dmfe_remove_one),
bc8a8387
ML
2164 .suspend = dmfe_suspend,
2165 .resume = dmfe_resume
1da177e4
LT
2166};
2167
2168MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2169MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2170MODULE_LICENSE("GPL");
2171MODULE_VERSION(DRV_VERSION);
2172
2173module_param(debug, int, 0);
2174module_param(mode, byte, 0);
2175module_param(cr6set, int, 0);
2176module_param(chkmode, byte, 0);
2177module_param(HPNA_mode, byte, 0);
2178module_param(HPNA_rx_cmd, byte, 0);
2179module_param(HPNA_tx_cmd, byte, 0);
2180module_param(HPNA_NoiseFloor, byte, 0);
2181module_param(SF_mode, byte, 0);
2182MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
f67ba792
ML
2183MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2184 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2185
2186MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2187 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
1da177e4
LT
2188
2189/* Description:
2190 * when user used insmod to add module, system invoked init_module()
2191 * to initilize and register.
2192 */
2193
2194static int __init dmfe_init_module(void)
2195{
2196 int rc;
2197
2198 printk(version);
2199 printed_version = 1;
2200
2201 DMFE_DBUG(0, "init_module() ", debug);
2202
2203 if (debug)
2204 dmfe_debug = debug; /* set debug flag */
2205 if (cr6set)
2206 dmfe_cr6_user_set = cr6set;
2207
2208 switch(mode) {
2209 case DMFE_10MHF:
2210 case DMFE_100MHF:
2211 case DMFE_10MFD:
2212 case DMFE_100MFD:
2213 case DMFE_1M_HPNA:
2214 dmfe_media_mode = mode;
2215 break;
2216 default:dmfe_media_mode = DMFE_AUTO;
2217 break;
2218 }
2219
2220 if (HPNA_mode > 4)
2221 HPNA_mode = 0; /* Default: LP/HS */
2222 if (HPNA_rx_cmd > 1)
2223 HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
2224 if (HPNA_tx_cmd > 1)
2225 HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
2226 if (HPNA_NoiseFloor > 15)
2227 HPNA_NoiseFloor = 0;
2228
29917620 2229 rc = pci_register_driver(&dmfe_driver);
1da177e4
LT
2230 if (rc < 0)
2231 return rc;
2232
2233 return 0;
2234}
2235
2236
2237/*
2238 * Description:
2239 * when user used rmmod to delete module, system invoked clean_module()
2240 * to un-register all registered services.
2241 */
2242
2243static void __exit dmfe_cleanup_module(void)
2244{
2245 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2246 pci_unregister_driver(&dmfe_driver);
2247}
2248
2249module_init(dmfe_init_module);
2250module_exit(dmfe_cleanup_module);