1 // SPDX-License-Identifier: GPL-2.0-only
3 * Network device driver for the BMAC ethernet controller on
4 * Apple Powermacs. Assumes it's under a DBDMA controller.
6 * Copyright (C) 1998 Randy Gobbel.
8 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
9 * dynamic procfs inode.
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/string.h>
18 #include <linux/timer.h>
19 #include <linux/proc_fs.h>
20 #include <linux/init.h>
21 #include <linux/spinlock.h>
22 #include <linux/crc32.h>
23 #include <linux/crc32poly.h>
24 #include <linux/bitrev.h>
25 #include <linux/ethtool.h>
26 #include <linux/slab.h>
28 #include <asm/dbdma.h>
31 #include <asm/pgtable.h>
32 #include <asm/machdep.h>
33 #include <asm/pmac_feature.h>
34 #include <asm/macio.h>
39 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
40 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
42 /* switch to use multicast code lifted from sunhme driver */
43 #define SUNHME_MULTICAST
47 #define MAX_TX_ACTIVE 1
49 #define ETHERMINPACKET 64
51 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
52 #define TX_TIMEOUT HZ /* 1 second */
54 /* Bits in transmit DMA status */
55 #define TX_DMA_ERR 0x80
60 /* volatile struct bmac *bmac; */
61 struct sk_buff_head *queue;
62 volatile struct dbdma_regs __iomem *tx_dma;
64 volatile struct dbdma_regs __iomem *rx_dma;
66 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
67 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
68 struct macio_dev *mdev;
70 struct sk_buff *rx_bufs[N_RX_RING];
73 struct sk_buff *tx_bufs[N_TX_RING];
76 unsigned char tx_fullup;
77 struct timer_list tx_timeout;
81 unsigned short hash_use_count[64];
82 unsigned short hash_table_mask[4];
86 #if 0 /* Move that to ethtool */
88 typedef struct bmac_reg_entry {
90 unsigned short reg_offset;
93 #define N_REG_ENTRIES 31
95 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
97 {"MEMDATAHI", MEMDATAHI},
98 {"MEMDATALO", MEMDATALO},
131 static unsigned char *bmac_emergency_rxbuf;
134 * Number of bytes of private data per BMAC: allow enough for
135 * the rx and tx dma commands plus a branch dma command each,
136 * and another 16 bytes to allow us to align the dma command
137 * buffers on a 16 byte boundary.
139 #define PRIV_BYTES (sizeof(struct bmac_data) \
140 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
141 + sizeof(struct sk_buff_head))
143 static int bmac_open(struct net_device *dev);
144 static int bmac_close(struct net_device *dev);
145 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
146 static void bmac_set_multicast(struct net_device *dev);
147 static void bmac_reset_and_enable(struct net_device *dev);
148 static void bmac_start_chip(struct net_device *dev);
149 static void bmac_init_chip(struct net_device *dev);
150 static void bmac_init_registers(struct net_device *dev);
151 static void bmac_enable_and_reset_chip(struct net_device *dev);
152 static int bmac_set_address(struct net_device *dev, void *addr);
153 static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
154 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
155 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
156 static void bmac_set_timeout(struct net_device *dev);
157 static void bmac_tx_timeout(struct timer_list *t);
158 static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
159 static void bmac_start(struct net_device *dev);
161 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
162 #define DBDMA_CLEAR(x) ( (x) << 16)
165 dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
167 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
170 static inline unsigned long
171 dbdma_ld32(volatile __u32 __iomem *a)
174 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
179 dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
181 dbdma_st32(&dmap->control,
182 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
187 dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
189 dbdma_st32(&dmap->control,
190 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
192 while (dbdma_ld32(&dmap->status) & RUN)
197 dbdma_setcmd(volatile struct dbdma_cmd *cp,
198 unsigned short cmd, unsigned count, unsigned long addr,
199 unsigned long cmd_dep)
201 out_le16(&cp->command, cmd);
202 out_le16(&cp->req_count, count);
203 out_le32(&cp->phy_addr, addr);
204 out_le32(&cp->cmd_dep, cmd_dep);
205 out_le16(&cp->xfer_status, 0);
206 out_le16(&cp->res_count, 0);
210 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
212 out_le16((void __iomem *)dev->base_addr + reg_offset, data);
217 unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
219 return in_le16((void __iomem *)dev->base_addr + reg_offset);
223 bmac_enable_and_reset_chip(struct net_device *dev)
225 struct bmac_data *bp = netdev_priv(dev);
226 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
227 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
234 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
237 #define MIFDELAY udelay(10)
240 bmac_mif_readbits(struct net_device *dev, int nb)
242 unsigned int val = 0;
245 bmwrite(dev, MIFCSR, 0);
247 if (bmread(dev, MIFCSR) & 8)
249 bmwrite(dev, MIFCSR, 1);
252 bmwrite(dev, MIFCSR, 0);
254 bmwrite(dev, MIFCSR, 1);
260 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
265 b = (val & (1 << nb))? 6: 4;
266 bmwrite(dev, MIFCSR, b);
268 bmwrite(dev, MIFCSR, b|1);
274 bmac_mif_read(struct net_device *dev, unsigned int addr)
278 bmwrite(dev, MIFCSR, 4);
280 bmac_mif_writebits(dev, ~0U, 32);
281 bmac_mif_writebits(dev, 6, 4);
282 bmac_mif_writebits(dev, addr, 10);
283 bmwrite(dev, MIFCSR, 2);
285 bmwrite(dev, MIFCSR, 1);
287 val = bmac_mif_readbits(dev, 17);
288 bmwrite(dev, MIFCSR, 4);
294 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
296 bmwrite(dev, MIFCSR, 4);
298 bmac_mif_writebits(dev, ~0U, 32);
299 bmac_mif_writebits(dev, 5, 4);
300 bmac_mif_writebits(dev, addr, 10);
301 bmac_mif_writebits(dev, 2, 2);
302 bmac_mif_writebits(dev, val, 16);
303 bmac_mif_writebits(dev, 3, 2);
307 bmac_init_registers(struct net_device *dev)
309 struct bmac_data *bp = netdev_priv(dev);
310 volatile unsigned short regValue;
311 unsigned short *pWord16;
314 /* XXDEBUG(("bmac: enter init_registers\n")); */
316 bmwrite(dev, RXRST, RxResetValue);
317 bmwrite(dev, TXRST, TxResetBit);
323 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
324 } while ((regValue & TxResetBit) && i > 0);
326 if (!bp->is_bmac_plus) {
327 regValue = bmread(dev, XCVRIF);
328 regValue |= ClkBit | SerialMode | COLActiveLow;
329 bmwrite(dev, XCVRIF, regValue);
333 bmwrite(dev, RSEED, (unsigned short)0x1968);
335 regValue = bmread(dev, XIFC);
336 regValue |= TxOutputEnable;
337 bmwrite(dev, XIFC, regValue);
341 /* set collision counters to 0 */
342 bmwrite(dev, NCCNT, 0);
343 bmwrite(dev, NTCNT, 0);
344 bmwrite(dev, EXCNT, 0);
345 bmwrite(dev, LTCNT, 0);
347 /* set rx counters to 0 */
348 bmwrite(dev, FRCNT, 0);
349 bmwrite(dev, LECNT, 0);
350 bmwrite(dev, AECNT, 0);
351 bmwrite(dev, FECNT, 0);
352 bmwrite(dev, RXCV, 0);
354 /* set tx fifo information */
355 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
357 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
358 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
360 /* set rx fifo information */
361 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
362 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
364 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
365 bmread(dev, STATUS); /* read it just to clear it */
367 /* zero out the chip Hash Filter registers */
368 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
369 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
370 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
371 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
372 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
374 pWord16 = (unsigned short *)dev->dev_addr;
375 bmwrite(dev, MADD0, *pWord16++);
376 bmwrite(dev, MADD1, *pWord16++);
377 bmwrite(dev, MADD2, *pWord16);
379 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
381 bmwrite(dev, INTDISABLE, EnableNormal);
386 bmac_disable_interrupts(struct net_device *dev)
388 bmwrite(dev, INTDISABLE, DisableAll);
392 bmac_enable_interrupts(struct net_device *dev)
394 bmwrite(dev, INTDISABLE, EnableNormal);
400 bmac_start_chip(struct net_device *dev)
402 struct bmac_data *bp = netdev_priv(dev);
403 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
404 unsigned short oldConfig;
406 /* enable rx dma channel */
409 oldConfig = bmread(dev, TXCFG);
410 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
412 /* turn on rx plus any other bits already on (promiscuous possibly) */
413 oldConfig = bmread(dev, RXCFG);
414 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
419 bmac_init_phy(struct net_device *dev)
422 struct bmac_data *bp = netdev_priv(dev);
424 printk(KERN_DEBUG "phy registers:");
425 for (addr = 0; addr < 32; ++addr) {
428 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
430 printk(KERN_CONT "\n");
432 if (bp->is_bmac_plus) {
433 unsigned int capable, ctrl;
435 ctrl = bmac_mif_read(dev, 0);
436 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
437 if (bmac_mif_read(dev, 4) != capable ||
438 (ctrl & 0x1000) == 0) {
439 bmac_mif_write(dev, 4, capable);
440 bmac_mif_write(dev, 0, 0x1200);
442 bmac_mif_write(dev, 0, 0x1000);
446 static void bmac_init_chip(struct net_device *dev)
449 bmac_init_registers(dev);
453 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
455 struct net_device* dev = macio_get_drvdata(mdev);
456 struct bmac_data *bp = netdev_priv(dev);
458 unsigned short config;
461 netif_device_detach(dev);
462 /* prolly should wait for dma to finish & turn off the chip */
463 spin_lock_irqsave(&bp->lock, flags);
464 if (bp->timeout_active) {
465 del_timer(&bp->tx_timeout);
466 bp->timeout_active = 0;
468 disable_irq(dev->irq);
469 disable_irq(bp->tx_dma_intr);
470 disable_irq(bp->rx_dma_intr);
472 spin_unlock_irqrestore(&bp->lock, flags);
474 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
475 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
477 config = bmread(dev, RXCFG);
478 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
479 config = bmread(dev, TXCFG);
480 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
481 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
482 /* disable rx and tx dma */
483 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
484 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
485 /* free some skb's */
486 for (i=0; i<N_RX_RING; i++) {
487 if (bp->rx_bufs[i] != NULL) {
488 dev_kfree_skb(bp->rx_bufs[i]);
489 bp->rx_bufs[i] = NULL;
492 for (i = 0; i<N_TX_RING; i++) {
493 if (bp->tx_bufs[i] != NULL) {
494 dev_kfree_skb(bp->tx_bufs[i]);
495 bp->tx_bufs[i] = NULL;
499 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
503 static int bmac_resume(struct macio_dev *mdev)
505 struct net_device* dev = macio_get_drvdata(mdev);
506 struct bmac_data *bp = netdev_priv(dev);
508 /* see if this is enough */
510 bmac_reset_and_enable(dev);
512 enable_irq(dev->irq);
513 enable_irq(bp->tx_dma_intr);
514 enable_irq(bp->rx_dma_intr);
515 netif_device_attach(dev);
519 #endif /* CONFIG_PM */
521 static int bmac_set_address(struct net_device *dev, void *addr)
523 struct bmac_data *bp = netdev_priv(dev);
524 unsigned char *p = addr;
525 unsigned short *pWord16;
529 XXDEBUG(("bmac: enter set_address\n"));
530 spin_lock_irqsave(&bp->lock, flags);
532 for (i = 0; i < 6; ++i) {
533 dev->dev_addr[i] = p[i];
535 /* load up the hardware address */
536 pWord16 = (unsigned short *)dev->dev_addr;
537 bmwrite(dev, MADD0, *pWord16++);
538 bmwrite(dev, MADD1, *pWord16++);
539 bmwrite(dev, MADD2, *pWord16);
541 spin_unlock_irqrestore(&bp->lock, flags);
542 XXDEBUG(("bmac: exit set_address\n"));
546 static inline void bmac_set_timeout(struct net_device *dev)
548 struct bmac_data *bp = netdev_priv(dev);
551 spin_lock_irqsave(&bp->lock, flags);
552 if (bp->timeout_active)
553 del_timer(&bp->tx_timeout);
554 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
555 add_timer(&bp->tx_timeout);
556 bp->timeout_active = 1;
557 spin_unlock_irqrestore(&bp->lock, flags);
561 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
569 baddr = virt_to_bus(vaddr);
571 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
575 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
577 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
579 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
580 virt_to_bus(addr), 0);
584 bmac_init_tx_ring(struct bmac_data *bp)
586 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
588 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
594 /* put a branch at the end of the tx command list */
595 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
596 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
600 out_le32(&td->wait_sel, 0x00200020);
601 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
605 bmac_init_rx_ring(struct net_device *dev)
607 struct bmac_data *bp = netdev_priv(dev);
608 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
612 /* initialize list of sk_buffs for receiving and set up recv dma */
613 memset((char *)bp->rx_cmds, 0,
614 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
615 for (i = 0; i < N_RX_RING; i++) {
616 if ((skb = bp->rx_bufs[i]) == NULL) {
617 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
621 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
627 /* Put a branch back to the beginning of the receive command list */
628 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
629 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
633 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
639 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
641 struct bmac_data *bp = netdev_priv(dev);
642 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
645 /* see if there's a free slot in the tx ring */
646 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
647 /* bp->tx_empty, bp->tx_fill)); */
651 if (i == bp->tx_empty) {
652 netif_stop_queue(dev);
654 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
655 return -1; /* can't take it at the moment */
658 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
660 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
662 bp->tx_bufs[bp->tx_fill] = skb;
665 dev->stats.tx_bytes += skb->len;
672 static int rxintcount;
674 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
676 struct net_device *dev = (struct net_device *) dev_id;
677 struct bmac_data *bp = netdev_priv(dev);
678 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
679 volatile struct dbdma_cmd *cp;
682 unsigned int residual;
686 spin_lock_irqsave(&bp->lock, flags);
688 if (++rxintcount < 10) {
689 XXDEBUG(("bmac_rxdma_intr\n"));
696 cp = &bp->rx_cmds[i];
697 stat = le16_to_cpu(cp->xfer_status);
698 residual = le16_to_cpu(cp->res_count);
699 if ((stat & ACTIVE) == 0)
701 nb = RX_BUFLEN - residual - 2;
702 if (nb < (ETHERMINPACKET - ETHERCRC)) {
704 dev->stats.rx_length_errors++;
705 dev->stats.rx_errors++;
707 skb = bp->rx_bufs[i];
708 bp->rx_bufs[i] = NULL;
713 skb->protocol = eth_type_trans(skb, dev);
715 ++dev->stats.rx_packets;
716 dev->stats.rx_bytes += nb;
718 ++dev->stats.rx_dropped;
720 if ((skb = bp->rx_bufs[i]) == NULL) {
721 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
723 skb_reserve(bp->rx_bufs[i], 2);
725 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
726 cp->res_count = cpu_to_le16(0);
727 cp->xfer_status = cpu_to_le16(0);
729 if (++i >= N_RX_RING) i = 0;
738 spin_unlock_irqrestore(&bp->lock, flags);
740 if (rxintcount < 10) {
741 XXDEBUG(("bmac_rxdma_intr done\n"));
746 static int txintcount;
748 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
750 struct net_device *dev = (struct net_device *) dev_id;
751 struct bmac_data *bp = netdev_priv(dev);
752 volatile struct dbdma_cmd *cp;
756 spin_lock_irqsave(&bp->lock, flags);
758 if (txintcount++ < 10) {
759 XXDEBUG(("bmac_txdma_intr\n"));
762 /* del_timer(&bp->tx_timeout); */
763 /* bp->timeout_active = 0; */
766 cp = &bp->tx_cmds[bp->tx_empty];
767 stat = le16_to_cpu(cp->xfer_status);
768 if (txintcount < 10) {
769 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
771 if (!(stat & ACTIVE)) {
773 * status field might not have been filled by DBDMA
775 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
779 if (bp->tx_bufs[bp->tx_empty]) {
780 ++dev->stats.tx_packets;
781 dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]);
783 bp->tx_bufs[bp->tx_empty] = NULL;
785 netif_wake_queue(dev);
786 if (++bp->tx_empty >= N_TX_RING)
788 if (bp->tx_empty == bp->tx_fill)
792 spin_unlock_irqrestore(&bp->lock, flags);
794 if (txintcount < 10) {
795 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
802 #ifndef SUNHME_MULTICAST
803 /* Real fast bit-reversal algorithm, 6-bit values */
804 static int reverse6[64] = {
805 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
806 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
807 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
808 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
809 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
810 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
811 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
812 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
816 crc416(unsigned int curval, unsigned short nxtval)
818 register unsigned int counter, cur = curval, next = nxtval;
819 register int high_crc_set, low_data_set;
822 next = ((next & 0x00FF) << 8) | (next >> 8);
824 /* Compute bit-by-bit */
825 for (counter = 0; counter < 16; ++counter) {
826 /* is high CRC bit set? */
827 if ((cur & 0x80000000) == 0) high_crc_set = 0;
828 else high_crc_set = 1;
832 if ((next & 0x0001) == 0) low_data_set = 0;
833 else low_data_set = 1;
838 if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
844 bmac_crc(unsigned short *address)
848 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
849 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
850 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
851 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
857 * Add requested mcast addr to BMac's hash table filter.
862 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
867 if (!(*addr)) return;
868 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
869 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
870 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
872 mask = (unsigned char)1 << mask;
873 bp->hash_use_count[crc/16] |= mask;
877 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
882 /* Now, delete the address from the filter copy, as indicated */
883 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
884 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
885 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
886 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
888 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
889 bp->hash_table_mask[crc/16] &= mask;
893 * Sync the adapter with the software copy of the multicast mask
894 * (logical address filter).
898 bmac_rx_off(struct net_device *dev)
900 unsigned short rx_cfg;
902 rx_cfg = bmread(dev, RXCFG);
903 rx_cfg &= ~RxMACEnable;
904 bmwrite(dev, RXCFG, rx_cfg);
906 rx_cfg = bmread(dev, RXCFG);
907 } while (rx_cfg & RxMACEnable);
911 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
913 unsigned short rx_cfg;
915 rx_cfg = bmread(dev, RXCFG);
916 rx_cfg |= RxMACEnable;
917 if (hash_enable) rx_cfg |= RxHashFilterEnable;
918 else rx_cfg &= ~RxHashFilterEnable;
919 if (promisc_enable) rx_cfg |= RxPromiscEnable;
920 else rx_cfg &= ~RxPromiscEnable;
921 bmwrite(dev, RXRST, RxResetValue);
922 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
923 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
924 bmwrite(dev, RXCFG, rx_cfg );
929 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
931 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
932 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
933 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
934 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
939 bmac_add_multi(struct net_device *dev,
940 struct bmac_data *bp, unsigned char *addr)
942 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
943 bmac_addhash(bp, addr);
945 bmac_update_hash_table_mask(dev, bp);
946 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
947 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
951 bmac_remove_multi(struct net_device *dev,
952 struct bmac_data *bp, unsigned char *addr)
954 bmac_removehash(bp, addr);
956 bmac_update_hash_table_mask(dev, bp);
957 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
961 /* Set or clear the multicast filter for this adaptor.
962 num_addrs == -1 Promiscuous mode, receive all packets
963 num_addrs == 0 Normal mode, clear multicast list
964 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
965 best-effort filtering.
967 static void bmac_set_multicast(struct net_device *dev)
969 struct netdev_hw_addr *ha;
970 struct bmac_data *bp = netdev_priv(dev);
971 int num_addrs = netdev_mc_count(dev);
972 unsigned short rx_cfg;
978 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
980 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
981 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
982 bmac_update_hash_table_mask(dev, bp);
983 rx_cfg = bmac_rx_on(dev, 1, 0);
984 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
985 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
986 rx_cfg = bmread(dev, RXCFG);
987 rx_cfg |= RxPromiscEnable;
988 bmwrite(dev, RXCFG, rx_cfg);
989 rx_cfg = bmac_rx_on(dev, 0, 1);
990 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
992 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
993 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
994 if (num_addrs == 0) {
995 rx_cfg = bmac_rx_on(dev, 0, 0);
996 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
998 netdev_for_each_mc_addr(ha, dev)
999 bmac_addhash(bp, ha->addr);
1000 bmac_update_hash_table_mask(dev, bp);
1001 rx_cfg = bmac_rx_on(dev, 1, 0);
1002 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1005 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1007 #else /* ifdef SUNHME_MULTICAST */
1009 /* The version of set_multicast below was lifted from sunhme.c */
1011 static void bmac_set_multicast(struct net_device *dev)
1013 struct netdev_hw_addr *ha;
1014 unsigned short rx_cfg;
1017 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1018 bmwrite(dev, BHASH0, 0xffff);
1019 bmwrite(dev, BHASH1, 0xffff);
1020 bmwrite(dev, BHASH2, 0xffff);
1021 bmwrite(dev, BHASH3, 0xffff);
1022 } else if(dev->flags & IFF_PROMISC) {
1023 rx_cfg = bmread(dev, RXCFG);
1024 rx_cfg |= RxPromiscEnable;
1025 bmwrite(dev, RXCFG, rx_cfg);
1027 u16 hash_table[4] = { 0 };
1029 rx_cfg = bmread(dev, RXCFG);
1030 rx_cfg &= ~RxPromiscEnable;
1031 bmwrite(dev, RXCFG, rx_cfg);
1033 netdev_for_each_mc_addr(ha, dev) {
1034 crc = ether_crc_le(6, ha->addr);
1036 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1038 bmwrite(dev, BHASH0, hash_table[0]);
1039 bmwrite(dev, BHASH1, hash_table[1]);
1040 bmwrite(dev, BHASH2, hash_table[2]);
1041 bmwrite(dev, BHASH3, hash_table[3]);
1044 #endif /* SUNHME_MULTICAST */
1046 static int miscintcount;
1048 static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
1050 struct net_device *dev = (struct net_device *) dev_id;
1051 unsigned int status = bmread(dev, STATUS);
1052 if (miscintcount++ < 10) {
1053 XXDEBUG(("bmac_misc_intr\n"));
1055 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1056 /* bmac_txdma_intr_inner(irq, dev_id); */
1057 /* if (status & FrameReceived) dev->stats.rx_dropped++; */
1058 if (status & RxErrorMask) dev->stats.rx_errors++;
1059 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
1060 if (status & RxLenCntExp) dev->stats.rx_length_errors++;
1061 if (status & RxOverFlow) dev->stats.rx_over_errors++;
1062 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
1064 /* if (status & FrameSent) dev->stats.tx_dropped++; */
1065 if (status & TxErrorMask) dev->stats.tx_errors++;
1066 if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
1067 if (status & TxNormalCollExp) dev->stats.collisions++;
1072 * Procedure for reading EEPROM
1074 #define SROMAddressLength 5
1075 #define DataInOn 0x0008
1076 #define DataInOff 0x0000
1078 #define ChipSelect 0x0001
1079 #define SDIShiftCount 3
1080 #define SD0ShiftCount 2
1081 #define DelayValue 1000 /* number of microseconds */
1082 #define SROMStartOffset 10 /* this is in words */
1083 #define SROMReadCount 3 /* number of words to read from SROM */
1084 #define SROMAddressBits 6
1085 #define EnetAddressOffset 20
1087 static unsigned char
1088 bmac_clock_out_bit(struct net_device *dev)
1090 unsigned short data;
1093 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1096 data = bmread(dev, SROMCSR);
1098 val = (data >> SD0ShiftCount) & 1;
1100 bmwrite(dev, SROMCSR, ChipSelect);
1107 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1109 unsigned short data;
1111 if (val != 0 && val != 1) return;
1113 data = (val << SDIShiftCount);
1114 bmwrite(dev, SROMCSR, data | ChipSelect );
1117 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1120 bmwrite(dev, SROMCSR, data | ChipSelect);
1125 reset_and_select_srom(struct net_device *dev)
1128 bmwrite(dev, SROMCSR, 0);
1131 /* send it the read command (110) */
1132 bmac_clock_in_bit(dev, 1);
1133 bmac_clock_in_bit(dev, 1);
1134 bmac_clock_in_bit(dev, 0);
1137 static unsigned short
1138 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1140 unsigned short data, val;
1143 /* send out the address we want to read from */
1144 for (i = 0; i < addr_len; i++) {
1145 val = addr >> (addr_len-i-1);
1146 bmac_clock_in_bit(dev, val & 1);
1149 /* Now read in the 16-bit data */
1151 for (i = 0; i < 16; i++) {
1152 val = bmac_clock_out_bit(dev);
1156 bmwrite(dev, SROMCSR, 0);
1162 * It looks like Cogent and SMC use different methods for calculating
1163 * checksums. What a pain..
1167 bmac_verify_checksum(struct net_device *dev)
1169 unsigned short data, storedCS;
1171 reset_and_select_srom(dev);
1172 data = read_srom(dev, 3, SROMAddressBits);
1173 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1180 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1183 unsigned short data;
1185 for (i = 0; i < 6; i++)
1187 reset_and_select_srom(dev);
1188 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1189 ea[2*i] = bitrev8(data & 0x0ff);
1190 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1194 static void bmac_reset_and_enable(struct net_device *dev)
1196 struct bmac_data *bp = netdev_priv(dev);
1197 unsigned long flags;
1198 struct sk_buff *skb;
1199 unsigned char *data;
1201 spin_lock_irqsave(&bp->lock, flags);
1202 bmac_enable_and_reset_chip(dev);
1203 bmac_init_tx_ring(bp);
1204 bmac_init_rx_ring(dev);
1205 bmac_init_chip(dev);
1206 bmac_start_chip(dev);
1207 bmwrite(dev, INTDISABLE, EnableNormal);
1211 * It seems that the bmac can't receive until it's transmitted
1212 * a packet. So we give it a dummy packet to transmit.
1214 skb = netdev_alloc_skb(dev, ETHERMINPACKET);
1216 data = skb_put_zero(skb, ETHERMINPACKET);
1217 memcpy(data, dev->dev_addr, ETH_ALEN);
1218 memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
1219 bmac_transmit_packet(skb, dev);
1221 spin_unlock_irqrestore(&bp->lock, flags);
1224 static const struct ethtool_ops bmac_ethtool_ops = {
1225 .get_link = ethtool_op_get_link,
1228 static const struct net_device_ops bmac_netdev_ops = {
1229 .ndo_open = bmac_open,
1230 .ndo_stop = bmac_close,
1231 .ndo_start_xmit = bmac_output,
1232 .ndo_set_rx_mode = bmac_set_multicast,
1233 .ndo_set_mac_address = bmac_set_address,
1234 .ndo_validate_addr = eth_validate_addr,
1237 static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1240 struct bmac_data *bp;
1241 const unsigned char *prop_addr;
1242 unsigned char addr[6];
1243 struct net_device *dev;
1244 int is_bmac_plus = ((int)match->data) != 0;
1246 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1247 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1250 prop_addr = of_get_property(macio_get_of_node(mdev),
1251 "mac-address", NULL);
1252 if (prop_addr == NULL) {
1253 prop_addr = of_get_property(macio_get_of_node(mdev),
1254 "local-mac-address", NULL);
1255 if (prop_addr == NULL) {
1256 printk(KERN_ERR "BMAC: Can't get mac-address\n");
1260 memcpy(addr, prop_addr, sizeof(addr));
1262 dev = alloc_etherdev(PRIV_BYTES);
1266 bp = netdev_priv(dev);
1267 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1268 macio_set_drvdata(mdev, dev);
1271 spin_lock_init(&bp->lock);
1273 if (macio_request_resources(mdev, "bmac")) {
1274 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1278 dev->base_addr = (unsigned long)
1279 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1280 if (dev->base_addr == 0)
1283 dev->irq = macio_irq(mdev, 0);
1285 bmac_enable_and_reset_chip(dev);
1286 bmwrite(dev, INTDISABLE, DisableAll);
1288 rev = addr[0] == 0 && addr[1] == 0xA0;
1289 for (j = 0; j < 6; ++j)
1290 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1292 /* Enable chip without interrupts for now */
1293 bmac_enable_and_reset_chip(dev);
1294 bmwrite(dev, INTDISABLE, DisableAll);
1296 dev->netdev_ops = &bmac_netdev_ops;
1297 dev->ethtool_ops = &bmac_ethtool_ops;
1299 bmac_get_station_address(dev, addr);
1300 if (bmac_verify_checksum(dev) != 0)
1301 goto err_out_iounmap;
1303 bp->is_bmac_plus = is_bmac_plus;
1304 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1306 goto err_out_iounmap;
1307 bp->tx_dma_intr = macio_irq(mdev, 1);
1308 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1310 goto err_out_iounmap_tx;
1311 bp->rx_dma_intr = macio_irq(mdev, 2);
1313 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1314 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1316 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1317 skb_queue_head_init(bp->queue);
1319 timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
1321 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1323 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1324 goto err_out_iounmap_rx;
1326 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1328 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1331 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1333 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1337 /* Mask chip interrupts and disable chip, will be
1338 * re-enabled on open()
1340 disable_irq(dev->irq);
1341 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1343 if (register_netdev(dev) != 0) {
1344 printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1348 printk(KERN_INFO "%s: BMAC%s at %pM",
1349 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1350 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1356 free_irq(bp->rx_dma_intr, dev);
1358 free_irq(bp->tx_dma_intr, dev);
1360 free_irq(dev->irq, dev);
1362 iounmap(bp->rx_dma);
1364 iounmap(bp->tx_dma);
1366 iounmap((void __iomem *)dev->base_addr);
1368 macio_release_resources(mdev);
1370 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1376 static int bmac_open(struct net_device *dev)
1378 struct bmac_data *bp = netdev_priv(dev);
1379 /* XXDEBUG(("bmac: enter open\n")); */
1380 /* reset the chip */
1382 bmac_reset_and_enable(dev);
1383 enable_irq(dev->irq);
1387 static int bmac_close(struct net_device *dev)
1389 struct bmac_data *bp = netdev_priv(dev);
1390 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1391 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1392 unsigned short config;
1397 /* disable rx and tx */
1398 config = bmread(dev, RXCFG);
1399 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1401 config = bmread(dev, TXCFG);
1402 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1404 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1406 /* disable rx and tx dma */
1407 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1408 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1410 /* free some skb's */
1411 XXDEBUG(("bmac: free rx bufs\n"));
1412 for (i=0; i<N_RX_RING; i++) {
1413 if (bp->rx_bufs[i] != NULL) {
1414 dev_kfree_skb(bp->rx_bufs[i]);
1415 bp->rx_bufs[i] = NULL;
1418 XXDEBUG(("bmac: free tx bufs\n"));
1419 for (i = 0; i<N_TX_RING; i++) {
1420 if (bp->tx_bufs[i] != NULL) {
1421 dev_kfree_skb(bp->tx_bufs[i]);
1422 bp->tx_bufs[i] = NULL;
1425 XXDEBUG(("bmac: all bufs freed\n"));
1428 disable_irq(dev->irq);
1429 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1435 bmac_start(struct net_device *dev)
1437 struct bmac_data *bp = netdev_priv(dev);
1439 struct sk_buff *skb;
1440 unsigned long flags;
1445 spin_lock_irqsave(&bp->lock, flags);
1447 i = bp->tx_fill + 1;
1450 if (i == bp->tx_empty)
1452 skb = skb_dequeue(bp->queue);
1455 bmac_transmit_packet(skb, dev);
1457 spin_unlock_irqrestore(&bp->lock, flags);
1461 bmac_output(struct sk_buff *skb, struct net_device *dev)
1463 struct bmac_data *bp = netdev_priv(dev);
1464 skb_queue_tail(bp->queue, skb);
1466 return NETDEV_TX_OK;
1469 static void bmac_tx_timeout(struct timer_list *t)
1471 struct bmac_data *bp = from_timer(bp, t, tx_timeout);
1472 struct net_device *dev = macio_get_drvdata(bp->mdev);
1473 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1474 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1475 volatile struct dbdma_cmd *cp;
1476 unsigned long flags;
1477 unsigned short config, oldConfig;
1480 XXDEBUG(("bmac: tx_timeout called\n"));
1481 spin_lock_irqsave(&bp->lock, flags);
1482 bp->timeout_active = 0;
1484 /* update various counters */
1485 /* bmac_handle_misc_intrs(bp, 0); */
1487 cp = &bp->tx_cmds[bp->tx_empty];
1488 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1489 /* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */
1490 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1492 /* turn off both tx and rx and reset the chip */
1493 config = bmread(dev, RXCFG);
1494 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1495 config = bmread(dev, TXCFG);
1496 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1497 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1498 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1499 bmac_enable_and_reset_chip(dev);
1501 /* restart rx dma */
1502 cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
1503 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1504 out_le16(&cp->xfer_status, 0);
1505 out_le32(&rd->cmdptr, virt_to_bus(cp));
1506 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1508 /* fix up the transmit side */
1509 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1510 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1512 ++dev->stats.tx_errors;
1513 if (i != bp->tx_fill) {
1514 dev_kfree_skb(bp->tx_bufs[i]);
1515 bp->tx_bufs[i] = NULL;
1516 if (++i >= N_TX_RING) i = 0;
1520 netif_wake_queue(dev);
1521 if (i != bp->tx_fill) {
1522 cp = &bp->tx_cmds[i];
1523 out_le16(&cp->xfer_status, 0);
1524 out_le16(&cp->command, OUTPUT_LAST);
1525 out_le32(&td->cmdptr, virt_to_bus(cp));
1526 out_le32(&td->control, DBDMA_SET(RUN));
1527 /* bmac_set_timeout(dev); */
1528 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1531 /* turn it back on */
1532 oldConfig = bmread(dev, RXCFG);
1533 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1534 oldConfig = bmread(dev, TXCFG);
1535 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1537 spin_unlock_irqrestore(&bp->lock, flags);
1541 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1545 for (i=0;i< count;i++) {
1548 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1552 le32_to_cpup(ip+3));
1560 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1567 if (bmac_devs == NULL)
1570 len += sprintf(buffer, "BMAC counters & registers\n");
1572 for (i = 0; i<N_REG_ENTRIES; i++) {
1573 len += sprintf(buffer + len, "%s: %#08x\n",
1574 reg_entries[i].name,
1575 bmread(bmac_devs, reg_entries[i].reg_offset));
1583 if (pos > offset+length) break;
1586 *start = buffer + (offset - begin);
1587 len -= (offset - begin);
1589 if (len > length) len = length;
1595 static int bmac_remove(struct macio_dev *mdev)
1597 struct net_device *dev = macio_get_drvdata(mdev);
1598 struct bmac_data *bp = netdev_priv(dev);
1600 unregister_netdev(dev);
1602 free_irq(dev->irq, dev);
1603 free_irq(bp->tx_dma_intr, dev);
1604 free_irq(bp->rx_dma_intr, dev);
1606 iounmap((void __iomem *)dev->base_addr);
1607 iounmap(bp->tx_dma);
1608 iounmap(bp->rx_dma);
1610 macio_release_resources(mdev);
1617 static const struct of_device_id bmac_match[] =
1625 .compatible = "bmac+",
1630 MODULE_DEVICE_TABLE (of, bmac_match);
1632 static struct macio_driver bmac_driver =
1636 .owner = THIS_MODULE,
1637 .of_match_table = bmac_match,
1639 .probe = bmac_probe,
1640 .remove = bmac_remove,
1642 .suspend = bmac_suspend,
1643 .resume = bmac_resume,
1648 static int __init bmac_init(void)
1650 if (bmac_emergency_rxbuf == NULL) {
1651 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1652 if (bmac_emergency_rxbuf == NULL)
1656 return macio_register_driver(&bmac_driver);
1659 static void __exit bmac_exit(void)
1661 macio_unregister_driver(&bmac_driver);
1663 kfree(bmac_emergency_rxbuf);
1664 bmac_emergency_rxbuf = NULL;
1667 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1668 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1669 MODULE_LICENSE("GPL");
1671 module_init(bmac_init);
1672 module_exit(bmac_exit);