2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1998 Randy Gobbel.
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/string.h>
17 #include <linux/timer.h>
18 #include <linux/proc_fs.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/crc32.h>
22 #include <linux/crc32poly.h>
23 #include <linux/bitrev.h>
24 #include <linux/ethtool.h>
25 #include <linux/slab.h>
27 #include <asm/dbdma.h>
30 #include <asm/pgtable.h>
31 #include <asm/machdep.h>
32 #include <asm/pmac_feature.h>
33 #include <asm/macio.h>
38 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
39 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
41 /* switch to use multicast code lifted from sunhme driver */
42 #define SUNHME_MULTICAST
46 #define MAX_TX_ACTIVE 1
48 #define ETHERMINPACKET 64
50 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
51 #define TX_TIMEOUT HZ /* 1 second */
53 /* Bits in transmit DMA status */
54 #define TX_DMA_ERR 0x80
59 /* volatile struct bmac *bmac; */
60 struct sk_buff_head *queue;
61 volatile struct dbdma_regs __iomem *tx_dma;
63 volatile struct dbdma_regs __iomem *rx_dma;
65 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
66 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
67 struct macio_dev *mdev;
69 struct sk_buff *rx_bufs[N_RX_RING];
72 struct sk_buff *tx_bufs[N_TX_RING];
75 unsigned char tx_fullup;
76 struct timer_list tx_timeout;
80 unsigned short hash_use_count[64];
81 unsigned short hash_table_mask[4];
85 #if 0 /* Move that to ethtool */
87 typedef struct bmac_reg_entry {
89 unsigned short reg_offset;
92 #define N_REG_ENTRIES 31
94 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
96 {"MEMDATAHI", MEMDATAHI},
97 {"MEMDATALO", MEMDATALO},
130 static unsigned char *bmac_emergency_rxbuf;
133 * Number of bytes of private data per BMAC: allow enough for
134 * the rx and tx dma commands plus a branch dma command each,
135 * and another 16 bytes to allow us to align the dma command
136 * buffers on a 16 byte boundary.
138 #define PRIV_BYTES (sizeof(struct bmac_data) \
139 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
140 + sizeof(struct sk_buff_head))
142 static int bmac_open(struct net_device *dev);
143 static int bmac_close(struct net_device *dev);
144 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
145 static void bmac_set_multicast(struct net_device *dev);
146 static void bmac_reset_and_enable(struct net_device *dev);
147 static void bmac_start_chip(struct net_device *dev);
148 static void bmac_init_chip(struct net_device *dev);
149 static void bmac_init_registers(struct net_device *dev);
150 static void bmac_enable_and_reset_chip(struct net_device *dev);
151 static int bmac_set_address(struct net_device *dev, void *addr);
152 static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
153 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
154 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
155 static void bmac_set_timeout(struct net_device *dev);
156 static void bmac_tx_timeout(struct timer_list *t);
157 static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
158 static void bmac_start(struct net_device *dev);
160 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
161 #define DBDMA_CLEAR(x) ( (x) << 16)
164 dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
166 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
169 static inline unsigned long
170 dbdma_ld32(volatile __u32 __iomem *a)
173 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
178 dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
180 dbdma_st32(&dmap->control,
181 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
186 dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
188 dbdma_st32(&dmap->control,
189 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
191 while (dbdma_ld32(&dmap->status) & RUN)
196 dbdma_setcmd(volatile struct dbdma_cmd *cp,
197 unsigned short cmd, unsigned count, unsigned long addr,
198 unsigned long cmd_dep)
200 out_le16(&cp->command, cmd);
201 out_le16(&cp->req_count, count);
202 out_le32(&cp->phy_addr, addr);
203 out_le32(&cp->cmd_dep, cmd_dep);
204 out_le16(&cp->xfer_status, 0);
205 out_le16(&cp->res_count, 0);
209 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
211 out_le16((void __iomem *)dev->base_addr + reg_offset, data);
216 unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
218 return in_le16((void __iomem *)dev->base_addr + reg_offset);
222 bmac_enable_and_reset_chip(struct net_device *dev)
224 struct bmac_data *bp = netdev_priv(dev);
225 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
226 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
233 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
236 #define MIFDELAY udelay(10)
239 bmac_mif_readbits(struct net_device *dev, int nb)
241 unsigned int val = 0;
244 bmwrite(dev, MIFCSR, 0);
246 if (bmread(dev, MIFCSR) & 8)
248 bmwrite(dev, MIFCSR, 1);
251 bmwrite(dev, MIFCSR, 0);
253 bmwrite(dev, MIFCSR, 1);
259 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
264 b = (val & (1 << nb))? 6: 4;
265 bmwrite(dev, MIFCSR, b);
267 bmwrite(dev, MIFCSR, b|1);
273 bmac_mif_read(struct net_device *dev, unsigned int addr)
277 bmwrite(dev, MIFCSR, 4);
279 bmac_mif_writebits(dev, ~0U, 32);
280 bmac_mif_writebits(dev, 6, 4);
281 bmac_mif_writebits(dev, addr, 10);
282 bmwrite(dev, MIFCSR, 2);
284 bmwrite(dev, MIFCSR, 1);
286 val = bmac_mif_readbits(dev, 17);
287 bmwrite(dev, MIFCSR, 4);
293 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
295 bmwrite(dev, MIFCSR, 4);
297 bmac_mif_writebits(dev, ~0U, 32);
298 bmac_mif_writebits(dev, 5, 4);
299 bmac_mif_writebits(dev, addr, 10);
300 bmac_mif_writebits(dev, 2, 2);
301 bmac_mif_writebits(dev, val, 16);
302 bmac_mif_writebits(dev, 3, 2);
306 bmac_init_registers(struct net_device *dev)
308 struct bmac_data *bp = netdev_priv(dev);
309 volatile unsigned short regValue;
310 unsigned short *pWord16;
313 /* XXDEBUG(("bmac: enter init_registers\n")); */
315 bmwrite(dev, RXRST, RxResetValue);
316 bmwrite(dev, TXRST, TxResetBit);
322 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
323 } while ((regValue & TxResetBit) && i > 0);
325 if (!bp->is_bmac_plus) {
326 regValue = bmread(dev, XCVRIF);
327 regValue |= ClkBit | SerialMode | COLActiveLow;
328 bmwrite(dev, XCVRIF, regValue);
332 bmwrite(dev, RSEED, (unsigned short)0x1968);
334 regValue = bmread(dev, XIFC);
335 regValue |= TxOutputEnable;
336 bmwrite(dev, XIFC, regValue);
340 /* set collision counters to 0 */
341 bmwrite(dev, NCCNT, 0);
342 bmwrite(dev, NTCNT, 0);
343 bmwrite(dev, EXCNT, 0);
344 bmwrite(dev, LTCNT, 0);
346 /* set rx counters to 0 */
347 bmwrite(dev, FRCNT, 0);
348 bmwrite(dev, LECNT, 0);
349 bmwrite(dev, AECNT, 0);
350 bmwrite(dev, FECNT, 0);
351 bmwrite(dev, RXCV, 0);
353 /* set tx fifo information */
354 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
356 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
357 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
359 /* set rx fifo information */
360 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
361 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
363 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
364 bmread(dev, STATUS); /* read it just to clear it */
366 /* zero out the chip Hash Filter registers */
367 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
368 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
369 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
370 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
371 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
373 pWord16 = (unsigned short *)dev->dev_addr;
374 bmwrite(dev, MADD0, *pWord16++);
375 bmwrite(dev, MADD1, *pWord16++);
376 bmwrite(dev, MADD2, *pWord16);
378 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
380 bmwrite(dev, INTDISABLE, EnableNormal);
385 bmac_disable_interrupts(struct net_device *dev)
387 bmwrite(dev, INTDISABLE, DisableAll);
391 bmac_enable_interrupts(struct net_device *dev)
393 bmwrite(dev, INTDISABLE, EnableNormal);
399 bmac_start_chip(struct net_device *dev)
401 struct bmac_data *bp = netdev_priv(dev);
402 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
403 unsigned short oldConfig;
405 /* enable rx dma channel */
408 oldConfig = bmread(dev, TXCFG);
409 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
411 /* turn on rx plus any other bits already on (promiscuous possibly) */
412 oldConfig = bmread(dev, RXCFG);
413 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
418 bmac_init_phy(struct net_device *dev)
421 struct bmac_data *bp = netdev_priv(dev);
423 printk(KERN_DEBUG "phy registers:");
424 for (addr = 0; addr < 32; ++addr) {
427 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
429 printk(KERN_CONT "\n");
431 if (bp->is_bmac_plus) {
432 unsigned int capable, ctrl;
434 ctrl = bmac_mif_read(dev, 0);
435 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
436 if (bmac_mif_read(dev, 4) != capable ||
437 (ctrl & 0x1000) == 0) {
438 bmac_mif_write(dev, 4, capable);
439 bmac_mif_write(dev, 0, 0x1200);
441 bmac_mif_write(dev, 0, 0x1000);
445 static void bmac_init_chip(struct net_device *dev)
448 bmac_init_registers(dev);
452 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
454 struct net_device* dev = macio_get_drvdata(mdev);
455 struct bmac_data *bp = netdev_priv(dev);
457 unsigned short config;
460 netif_device_detach(dev);
461 /* prolly should wait for dma to finish & turn off the chip */
462 spin_lock_irqsave(&bp->lock, flags);
463 if (bp->timeout_active) {
464 del_timer(&bp->tx_timeout);
465 bp->timeout_active = 0;
467 disable_irq(dev->irq);
468 disable_irq(bp->tx_dma_intr);
469 disable_irq(bp->rx_dma_intr);
471 spin_unlock_irqrestore(&bp->lock, flags);
473 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
474 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
476 config = bmread(dev, RXCFG);
477 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
478 config = bmread(dev, TXCFG);
479 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
480 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
481 /* disable rx and tx dma */
482 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
483 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
484 /* free some skb's */
485 for (i=0; i<N_RX_RING; i++) {
486 if (bp->rx_bufs[i] != NULL) {
487 dev_kfree_skb(bp->rx_bufs[i]);
488 bp->rx_bufs[i] = NULL;
491 for (i = 0; i<N_TX_RING; i++) {
492 if (bp->tx_bufs[i] != NULL) {
493 dev_kfree_skb(bp->tx_bufs[i]);
494 bp->tx_bufs[i] = NULL;
498 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
502 static int bmac_resume(struct macio_dev *mdev)
504 struct net_device* dev = macio_get_drvdata(mdev);
505 struct bmac_data *bp = netdev_priv(dev);
507 /* see if this is enough */
509 bmac_reset_and_enable(dev);
511 enable_irq(dev->irq);
512 enable_irq(bp->tx_dma_intr);
513 enable_irq(bp->rx_dma_intr);
514 netif_device_attach(dev);
518 #endif /* CONFIG_PM */
520 static int bmac_set_address(struct net_device *dev, void *addr)
522 struct bmac_data *bp = netdev_priv(dev);
523 unsigned char *p = addr;
524 unsigned short *pWord16;
528 XXDEBUG(("bmac: enter set_address\n"));
529 spin_lock_irqsave(&bp->lock, flags);
531 for (i = 0; i < 6; ++i) {
532 dev->dev_addr[i] = p[i];
534 /* load up the hardware address */
535 pWord16 = (unsigned short *)dev->dev_addr;
536 bmwrite(dev, MADD0, *pWord16++);
537 bmwrite(dev, MADD1, *pWord16++);
538 bmwrite(dev, MADD2, *pWord16);
540 spin_unlock_irqrestore(&bp->lock, flags);
541 XXDEBUG(("bmac: exit set_address\n"));
545 static inline void bmac_set_timeout(struct net_device *dev)
547 struct bmac_data *bp = netdev_priv(dev);
550 spin_lock_irqsave(&bp->lock, flags);
551 if (bp->timeout_active)
552 del_timer(&bp->tx_timeout);
553 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
554 add_timer(&bp->tx_timeout);
555 bp->timeout_active = 1;
556 spin_unlock_irqrestore(&bp->lock, flags);
560 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
568 baddr = virt_to_bus(vaddr);
570 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
574 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
576 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
578 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
579 virt_to_bus(addr), 0);
583 bmac_init_tx_ring(struct bmac_data *bp)
585 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
587 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
593 /* put a branch at the end of the tx command list */
594 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
595 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
599 out_le32(&td->wait_sel, 0x00200020);
600 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
604 bmac_init_rx_ring(struct net_device *dev)
606 struct bmac_data *bp = netdev_priv(dev);
607 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
611 /* initialize list of sk_buffs for receiving and set up recv dma */
612 memset((char *)bp->rx_cmds, 0,
613 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
614 for (i = 0; i < N_RX_RING; i++) {
615 if ((skb = bp->rx_bufs[i]) == NULL) {
616 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
620 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
626 /* Put a branch back to the beginning of the receive command list */
627 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
628 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
632 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
638 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
640 struct bmac_data *bp = netdev_priv(dev);
641 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
644 /* see if there's a free slot in the tx ring */
645 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
646 /* bp->tx_empty, bp->tx_fill)); */
650 if (i == bp->tx_empty) {
651 netif_stop_queue(dev);
653 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
654 return -1; /* can't take it at the moment */
657 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
659 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
661 bp->tx_bufs[bp->tx_fill] = skb;
664 dev->stats.tx_bytes += skb->len;
671 static int rxintcount;
673 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
675 struct net_device *dev = (struct net_device *) dev_id;
676 struct bmac_data *bp = netdev_priv(dev);
677 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
678 volatile struct dbdma_cmd *cp;
681 unsigned int residual;
685 spin_lock_irqsave(&bp->lock, flags);
687 if (++rxintcount < 10) {
688 XXDEBUG(("bmac_rxdma_intr\n"));
695 cp = &bp->rx_cmds[i];
696 stat = le16_to_cpu(cp->xfer_status);
697 residual = le16_to_cpu(cp->res_count);
698 if ((stat & ACTIVE) == 0)
700 nb = RX_BUFLEN - residual - 2;
701 if (nb < (ETHERMINPACKET - ETHERCRC)) {
703 dev->stats.rx_length_errors++;
704 dev->stats.rx_errors++;
706 skb = bp->rx_bufs[i];
707 bp->rx_bufs[i] = NULL;
712 skb->protocol = eth_type_trans(skb, dev);
714 ++dev->stats.rx_packets;
715 dev->stats.rx_bytes += nb;
717 ++dev->stats.rx_dropped;
719 if ((skb = bp->rx_bufs[i]) == NULL) {
720 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
722 skb_reserve(bp->rx_bufs[i], 2);
724 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
725 cp->res_count = cpu_to_le16(0);
726 cp->xfer_status = cpu_to_le16(0);
728 if (++i >= N_RX_RING) i = 0;
737 spin_unlock_irqrestore(&bp->lock, flags);
739 if (rxintcount < 10) {
740 XXDEBUG(("bmac_rxdma_intr done\n"));
745 static int txintcount;
747 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
749 struct net_device *dev = (struct net_device *) dev_id;
750 struct bmac_data *bp = netdev_priv(dev);
751 volatile struct dbdma_cmd *cp;
755 spin_lock_irqsave(&bp->lock, flags);
757 if (txintcount++ < 10) {
758 XXDEBUG(("bmac_txdma_intr\n"));
761 /* del_timer(&bp->tx_timeout); */
762 /* bp->timeout_active = 0; */
765 cp = &bp->tx_cmds[bp->tx_empty];
766 stat = le16_to_cpu(cp->xfer_status);
767 if (txintcount < 10) {
768 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
770 if (!(stat & ACTIVE)) {
772 * status field might not have been filled by DBDMA
774 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
778 if (bp->tx_bufs[bp->tx_empty]) {
779 ++dev->stats.tx_packets;
780 dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]);
782 bp->tx_bufs[bp->tx_empty] = NULL;
784 netif_wake_queue(dev);
785 if (++bp->tx_empty >= N_TX_RING)
787 if (bp->tx_empty == bp->tx_fill)
791 spin_unlock_irqrestore(&bp->lock, flags);
793 if (txintcount < 10) {
794 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
801 #ifndef SUNHME_MULTICAST
802 /* Real fast bit-reversal algorithm, 6-bit values */
803 static int reverse6[64] = {
804 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
805 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
806 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
807 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
808 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
809 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
810 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
811 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
815 crc416(unsigned int curval, unsigned short nxtval)
817 register unsigned int counter, cur = curval, next = nxtval;
818 register int high_crc_set, low_data_set;
821 next = ((next & 0x00FF) << 8) | (next >> 8);
823 /* Compute bit-by-bit */
824 for (counter = 0; counter < 16; ++counter) {
825 /* is high CRC bit set? */
826 if ((cur & 0x80000000) == 0) high_crc_set = 0;
827 else high_crc_set = 1;
831 if ((next & 0x0001) == 0) low_data_set = 0;
832 else low_data_set = 1;
837 if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
843 bmac_crc(unsigned short *address)
847 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
848 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
849 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
850 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
856 * Add requested mcast addr to BMac's hash table filter.
861 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
866 if (!(*addr)) return;
867 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
868 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
869 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
871 mask = (unsigned char)1 << mask;
872 bp->hash_use_count[crc/16] |= mask;
876 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
881 /* Now, delete the address from the filter copy, as indicated */
882 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
883 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
884 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
885 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
887 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
888 bp->hash_table_mask[crc/16] &= mask;
892 * Sync the adapter with the software copy of the multicast mask
893 * (logical address filter).
897 bmac_rx_off(struct net_device *dev)
899 unsigned short rx_cfg;
901 rx_cfg = bmread(dev, RXCFG);
902 rx_cfg &= ~RxMACEnable;
903 bmwrite(dev, RXCFG, rx_cfg);
905 rx_cfg = bmread(dev, RXCFG);
906 } while (rx_cfg & RxMACEnable);
910 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
912 unsigned short rx_cfg;
914 rx_cfg = bmread(dev, RXCFG);
915 rx_cfg |= RxMACEnable;
916 if (hash_enable) rx_cfg |= RxHashFilterEnable;
917 else rx_cfg &= ~RxHashFilterEnable;
918 if (promisc_enable) rx_cfg |= RxPromiscEnable;
919 else rx_cfg &= ~RxPromiscEnable;
920 bmwrite(dev, RXRST, RxResetValue);
921 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
922 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
923 bmwrite(dev, RXCFG, rx_cfg );
928 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
930 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
931 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
932 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
933 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
938 bmac_add_multi(struct net_device *dev,
939 struct bmac_data *bp, unsigned char *addr)
941 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
942 bmac_addhash(bp, addr);
944 bmac_update_hash_table_mask(dev, bp);
945 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
946 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
950 bmac_remove_multi(struct net_device *dev,
951 struct bmac_data *bp, unsigned char *addr)
953 bmac_removehash(bp, addr);
955 bmac_update_hash_table_mask(dev, bp);
956 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
960 /* Set or clear the multicast filter for this adaptor.
961 num_addrs == -1 Promiscuous mode, receive all packets
962 num_addrs == 0 Normal mode, clear multicast list
963 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
964 best-effort filtering.
966 static void bmac_set_multicast(struct net_device *dev)
968 struct netdev_hw_addr *ha;
969 struct bmac_data *bp = netdev_priv(dev);
970 int num_addrs = netdev_mc_count(dev);
971 unsigned short rx_cfg;
977 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
979 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
980 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
981 bmac_update_hash_table_mask(dev, bp);
982 rx_cfg = bmac_rx_on(dev, 1, 0);
983 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
984 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
985 rx_cfg = bmread(dev, RXCFG);
986 rx_cfg |= RxPromiscEnable;
987 bmwrite(dev, RXCFG, rx_cfg);
988 rx_cfg = bmac_rx_on(dev, 0, 1);
989 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
991 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
992 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
993 if (num_addrs == 0) {
994 rx_cfg = bmac_rx_on(dev, 0, 0);
995 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
997 netdev_for_each_mc_addr(ha, dev)
998 bmac_addhash(bp, ha->addr);
999 bmac_update_hash_table_mask(dev, bp);
1000 rx_cfg = bmac_rx_on(dev, 1, 0);
1001 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1004 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1006 #else /* ifdef SUNHME_MULTICAST */
1008 /* The version of set_multicast below was lifted from sunhme.c */
1010 static void bmac_set_multicast(struct net_device *dev)
1012 struct netdev_hw_addr *ha;
1013 unsigned short rx_cfg;
1016 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1017 bmwrite(dev, BHASH0, 0xffff);
1018 bmwrite(dev, BHASH1, 0xffff);
1019 bmwrite(dev, BHASH2, 0xffff);
1020 bmwrite(dev, BHASH3, 0xffff);
1021 } else if(dev->flags & IFF_PROMISC) {
1022 rx_cfg = bmread(dev, RXCFG);
1023 rx_cfg |= RxPromiscEnable;
1024 bmwrite(dev, RXCFG, rx_cfg);
1026 u16 hash_table[4] = { 0 };
1028 rx_cfg = bmread(dev, RXCFG);
1029 rx_cfg &= ~RxPromiscEnable;
1030 bmwrite(dev, RXCFG, rx_cfg);
1032 netdev_for_each_mc_addr(ha, dev) {
1033 crc = ether_crc_le(6, ha->addr);
1035 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1037 bmwrite(dev, BHASH0, hash_table[0]);
1038 bmwrite(dev, BHASH1, hash_table[1]);
1039 bmwrite(dev, BHASH2, hash_table[2]);
1040 bmwrite(dev, BHASH3, hash_table[3]);
1043 #endif /* SUNHME_MULTICAST */
1045 static int miscintcount;
1047 static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
1049 struct net_device *dev = (struct net_device *) dev_id;
1050 unsigned int status = bmread(dev, STATUS);
1051 if (miscintcount++ < 10) {
1052 XXDEBUG(("bmac_misc_intr\n"));
1054 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1055 /* bmac_txdma_intr_inner(irq, dev_id); */
1056 /* if (status & FrameReceived) dev->stats.rx_dropped++; */
1057 if (status & RxErrorMask) dev->stats.rx_errors++;
1058 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
1059 if (status & RxLenCntExp) dev->stats.rx_length_errors++;
1060 if (status & RxOverFlow) dev->stats.rx_over_errors++;
1061 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
1063 /* if (status & FrameSent) dev->stats.tx_dropped++; */
1064 if (status & TxErrorMask) dev->stats.tx_errors++;
1065 if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
1066 if (status & TxNormalCollExp) dev->stats.collisions++;
1071 * Procedure for reading EEPROM
1073 #define SROMAddressLength 5
1074 #define DataInOn 0x0008
1075 #define DataInOff 0x0000
1077 #define ChipSelect 0x0001
1078 #define SDIShiftCount 3
1079 #define SD0ShiftCount 2
1080 #define DelayValue 1000 /* number of microseconds */
1081 #define SROMStartOffset 10 /* this is in words */
1082 #define SROMReadCount 3 /* number of words to read from SROM */
1083 #define SROMAddressBits 6
1084 #define EnetAddressOffset 20
1086 static unsigned char
1087 bmac_clock_out_bit(struct net_device *dev)
1089 unsigned short data;
1092 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1095 data = bmread(dev, SROMCSR);
1097 val = (data >> SD0ShiftCount) & 1;
1099 bmwrite(dev, SROMCSR, ChipSelect);
1106 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1108 unsigned short data;
1110 if (val != 0 && val != 1) return;
1112 data = (val << SDIShiftCount);
1113 bmwrite(dev, SROMCSR, data | ChipSelect );
1116 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1119 bmwrite(dev, SROMCSR, data | ChipSelect);
1124 reset_and_select_srom(struct net_device *dev)
1127 bmwrite(dev, SROMCSR, 0);
1130 /* send it the read command (110) */
1131 bmac_clock_in_bit(dev, 1);
1132 bmac_clock_in_bit(dev, 1);
1133 bmac_clock_in_bit(dev, 0);
1136 static unsigned short
1137 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1139 unsigned short data, val;
1142 /* send out the address we want to read from */
1143 for (i = 0; i < addr_len; i++) {
1144 val = addr >> (addr_len-i-1);
1145 bmac_clock_in_bit(dev, val & 1);
1148 /* Now read in the 16-bit data */
1150 for (i = 0; i < 16; i++) {
1151 val = bmac_clock_out_bit(dev);
1155 bmwrite(dev, SROMCSR, 0);
1161 * It looks like Cogent and SMC use different methods for calculating
1162 * checksums. What a pain..
1166 bmac_verify_checksum(struct net_device *dev)
1168 unsigned short data, storedCS;
1170 reset_and_select_srom(dev);
1171 data = read_srom(dev, 3, SROMAddressBits);
1172 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1179 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1182 unsigned short data;
1184 for (i = 0; i < 6; i++)
1186 reset_and_select_srom(dev);
1187 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1188 ea[2*i] = bitrev8(data & 0x0ff);
1189 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1193 static void bmac_reset_and_enable(struct net_device *dev)
1195 struct bmac_data *bp = netdev_priv(dev);
1196 unsigned long flags;
1197 struct sk_buff *skb;
1198 unsigned char *data;
1200 spin_lock_irqsave(&bp->lock, flags);
1201 bmac_enable_and_reset_chip(dev);
1202 bmac_init_tx_ring(bp);
1203 bmac_init_rx_ring(dev);
1204 bmac_init_chip(dev);
1205 bmac_start_chip(dev);
1206 bmwrite(dev, INTDISABLE, EnableNormal);
1210 * It seems that the bmac can't receive until it's transmitted
1211 * a packet. So we give it a dummy packet to transmit.
1213 skb = netdev_alloc_skb(dev, ETHERMINPACKET);
1215 data = skb_put_zero(skb, ETHERMINPACKET);
1216 memcpy(data, dev->dev_addr, ETH_ALEN);
1217 memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
1218 bmac_transmit_packet(skb, dev);
1220 spin_unlock_irqrestore(&bp->lock, flags);
1223 static const struct ethtool_ops bmac_ethtool_ops = {
1224 .get_link = ethtool_op_get_link,
1227 static const struct net_device_ops bmac_netdev_ops = {
1228 .ndo_open = bmac_open,
1229 .ndo_stop = bmac_close,
1230 .ndo_start_xmit = bmac_output,
1231 .ndo_set_rx_mode = bmac_set_multicast,
1232 .ndo_set_mac_address = bmac_set_address,
1233 .ndo_validate_addr = eth_validate_addr,
1236 static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1239 struct bmac_data *bp;
1240 const unsigned char *prop_addr;
1241 unsigned char addr[6];
1242 struct net_device *dev;
1243 int is_bmac_plus = ((int)match->data) != 0;
1245 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1246 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1249 prop_addr = of_get_property(macio_get_of_node(mdev),
1250 "mac-address", NULL);
1251 if (prop_addr == NULL) {
1252 prop_addr = of_get_property(macio_get_of_node(mdev),
1253 "local-mac-address", NULL);
1254 if (prop_addr == NULL) {
1255 printk(KERN_ERR "BMAC: Can't get mac-address\n");
1259 memcpy(addr, prop_addr, sizeof(addr));
1261 dev = alloc_etherdev(PRIV_BYTES);
1265 bp = netdev_priv(dev);
1266 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1267 macio_set_drvdata(mdev, dev);
1270 spin_lock_init(&bp->lock);
1272 if (macio_request_resources(mdev, "bmac")) {
1273 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1277 dev->base_addr = (unsigned long)
1278 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1279 if (dev->base_addr == 0)
1282 dev->irq = macio_irq(mdev, 0);
1284 bmac_enable_and_reset_chip(dev);
1285 bmwrite(dev, INTDISABLE, DisableAll);
1287 rev = addr[0] == 0 && addr[1] == 0xA0;
1288 for (j = 0; j < 6; ++j)
1289 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1291 /* Enable chip without interrupts for now */
1292 bmac_enable_and_reset_chip(dev);
1293 bmwrite(dev, INTDISABLE, DisableAll);
1295 dev->netdev_ops = &bmac_netdev_ops;
1296 dev->ethtool_ops = &bmac_ethtool_ops;
1298 bmac_get_station_address(dev, addr);
1299 if (bmac_verify_checksum(dev) != 0)
1300 goto err_out_iounmap;
1302 bp->is_bmac_plus = is_bmac_plus;
1303 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1305 goto err_out_iounmap;
1306 bp->tx_dma_intr = macio_irq(mdev, 1);
1307 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1309 goto err_out_iounmap_tx;
1310 bp->rx_dma_intr = macio_irq(mdev, 2);
1312 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1313 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1315 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1316 skb_queue_head_init(bp->queue);
1318 timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
1320 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1322 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1323 goto err_out_iounmap_rx;
1325 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1327 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1330 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1332 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1336 /* Mask chip interrupts and disable chip, will be
1337 * re-enabled on open()
1339 disable_irq(dev->irq);
1340 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1342 if (register_netdev(dev) != 0) {
1343 printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1347 printk(KERN_INFO "%s: BMAC%s at %pM",
1348 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1349 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1355 free_irq(bp->rx_dma_intr, dev);
1357 free_irq(bp->tx_dma_intr, dev);
1359 free_irq(dev->irq, dev);
1361 iounmap(bp->rx_dma);
1363 iounmap(bp->tx_dma);
1365 iounmap((void __iomem *)dev->base_addr);
1367 macio_release_resources(mdev);
1369 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1375 static int bmac_open(struct net_device *dev)
1377 struct bmac_data *bp = netdev_priv(dev);
1378 /* XXDEBUG(("bmac: enter open\n")); */
1379 /* reset the chip */
1381 bmac_reset_and_enable(dev);
1382 enable_irq(dev->irq);
1386 static int bmac_close(struct net_device *dev)
1388 struct bmac_data *bp = netdev_priv(dev);
1389 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1390 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1391 unsigned short config;
1396 /* disable rx and tx */
1397 config = bmread(dev, RXCFG);
1398 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1400 config = bmread(dev, TXCFG);
1401 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1403 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1405 /* disable rx and tx dma */
1406 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1407 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1409 /* free some skb's */
1410 XXDEBUG(("bmac: free rx bufs\n"));
1411 for (i=0; i<N_RX_RING; i++) {
1412 if (bp->rx_bufs[i] != NULL) {
1413 dev_kfree_skb(bp->rx_bufs[i]);
1414 bp->rx_bufs[i] = NULL;
1417 XXDEBUG(("bmac: free tx bufs\n"));
1418 for (i = 0; i<N_TX_RING; i++) {
1419 if (bp->tx_bufs[i] != NULL) {
1420 dev_kfree_skb(bp->tx_bufs[i]);
1421 bp->tx_bufs[i] = NULL;
1424 XXDEBUG(("bmac: all bufs freed\n"));
1427 disable_irq(dev->irq);
1428 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1434 bmac_start(struct net_device *dev)
1436 struct bmac_data *bp = netdev_priv(dev);
1438 struct sk_buff *skb;
1439 unsigned long flags;
1444 spin_lock_irqsave(&bp->lock, flags);
1446 i = bp->tx_fill + 1;
1449 if (i == bp->tx_empty)
1451 skb = skb_dequeue(bp->queue);
1454 bmac_transmit_packet(skb, dev);
1456 spin_unlock_irqrestore(&bp->lock, flags);
1460 bmac_output(struct sk_buff *skb, struct net_device *dev)
1462 struct bmac_data *bp = netdev_priv(dev);
1463 skb_queue_tail(bp->queue, skb);
1465 return NETDEV_TX_OK;
1468 static void bmac_tx_timeout(struct timer_list *t)
1470 struct bmac_data *bp = from_timer(bp, t, tx_timeout);
1471 struct net_device *dev = macio_get_drvdata(bp->mdev);
1472 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1473 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1474 volatile struct dbdma_cmd *cp;
1475 unsigned long flags;
1476 unsigned short config, oldConfig;
1479 XXDEBUG(("bmac: tx_timeout called\n"));
1480 spin_lock_irqsave(&bp->lock, flags);
1481 bp->timeout_active = 0;
1483 /* update various counters */
1484 /* bmac_handle_misc_intrs(bp, 0); */
1486 cp = &bp->tx_cmds[bp->tx_empty];
1487 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1488 /* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */
1489 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1491 /* turn off both tx and rx and reset the chip */
1492 config = bmread(dev, RXCFG);
1493 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1494 config = bmread(dev, TXCFG);
1495 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1496 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1497 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1498 bmac_enable_and_reset_chip(dev);
1500 /* restart rx dma */
1501 cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
1502 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1503 out_le16(&cp->xfer_status, 0);
1504 out_le32(&rd->cmdptr, virt_to_bus(cp));
1505 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1507 /* fix up the transmit side */
1508 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1509 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1511 ++dev->stats.tx_errors;
1512 if (i != bp->tx_fill) {
1513 dev_kfree_skb(bp->tx_bufs[i]);
1514 bp->tx_bufs[i] = NULL;
1515 if (++i >= N_TX_RING) i = 0;
1519 netif_wake_queue(dev);
1520 if (i != bp->tx_fill) {
1521 cp = &bp->tx_cmds[i];
1522 out_le16(&cp->xfer_status, 0);
1523 out_le16(&cp->command, OUTPUT_LAST);
1524 out_le32(&td->cmdptr, virt_to_bus(cp));
1525 out_le32(&td->control, DBDMA_SET(RUN));
1526 /* bmac_set_timeout(dev); */
1527 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1530 /* turn it back on */
1531 oldConfig = bmread(dev, RXCFG);
1532 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1533 oldConfig = bmread(dev, TXCFG);
1534 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1536 spin_unlock_irqrestore(&bp->lock, flags);
1540 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1544 for (i=0;i< count;i++) {
1547 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1551 le32_to_cpup(ip+3));
1559 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1566 if (bmac_devs == NULL)
1569 len += sprintf(buffer, "BMAC counters & registers\n");
1571 for (i = 0; i<N_REG_ENTRIES; i++) {
1572 len += sprintf(buffer + len, "%s: %#08x\n",
1573 reg_entries[i].name,
1574 bmread(bmac_devs, reg_entries[i].reg_offset));
1582 if (pos > offset+length) break;
1585 *start = buffer + (offset - begin);
1586 len -= (offset - begin);
1588 if (len > length) len = length;
1594 static int bmac_remove(struct macio_dev *mdev)
1596 struct net_device *dev = macio_get_drvdata(mdev);
1597 struct bmac_data *bp = netdev_priv(dev);
1599 unregister_netdev(dev);
1601 free_irq(dev->irq, dev);
1602 free_irq(bp->tx_dma_intr, dev);
1603 free_irq(bp->rx_dma_intr, dev);
1605 iounmap((void __iomem *)dev->base_addr);
1606 iounmap(bp->tx_dma);
1607 iounmap(bp->rx_dma);
1609 macio_release_resources(mdev);
1616 static const struct of_device_id bmac_match[] =
1624 .compatible = "bmac+",
1629 MODULE_DEVICE_TABLE (of, bmac_match);
1631 static struct macio_driver bmac_driver =
1635 .owner = THIS_MODULE,
1636 .of_match_table = bmac_match,
1638 .probe = bmac_probe,
1639 .remove = bmac_remove,
1641 .suspend = bmac_suspend,
1642 .resume = bmac_resume,
1647 static int __init bmac_init(void)
1649 if (bmac_emergency_rxbuf == NULL) {
1650 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1651 if (bmac_emergency_rxbuf == NULL)
1655 return macio_register_driver(&bmac_driver);
1658 static void __exit bmac_exit(void)
1660 macio_unregister_driver(&bmac_driver);
1662 kfree(bmac_emergency_rxbuf);
1663 bmac_emergency_rxbuf = NULL;
1666 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1667 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1668 MODULE_LICENSE("GPL");
1670 module_init(bmac_init);
1671 module_exit(bmac_exit);