2 * linux/drivers/net/ethernet/ethoc.c
4 * Copyright (C) 2007-2008 Avionic Design Development GmbH
5 * Copyright (C) 2008-2009 Avionic Design GmbH
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Written by Thierry Reding <thierry.reding@avionic-design.de>
14 #include <linux/dma-mapping.h>
15 #include <linux/etherdevice.h>
16 #include <linux/clk.h>
17 #include <linux/crc32.h>
18 #include <linux/interrupt.h>
20 #include <linux/mii.h>
21 #include <linux/phy.h>
22 #include <linux/platform_device.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
26 #include <linux/of_net.h>
27 #include <linux/module.h>
28 #include <net/ethoc.h>
30 static int buffer_size = 0x8000; /* 32 KBytes */
31 module_param(buffer_size, int, 0);
32 MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
34 /* register offsets */
36 #define INT_SOURCE 0x04
41 #define PACKETLEN 0x18
43 #define TX_BD_NUM 0x20
44 #define CTRLMODER 0x24
46 #define MIICOMMAND 0x2c
47 #define MIIADDRESS 0x30
48 #define MIITX_DATA 0x34
49 #define MIIRX_DATA 0x38
50 #define MIISTATUS 0x3c
51 #define MAC_ADDR0 0x40
52 #define MAC_ADDR1 0x44
53 #define ETH_HASH0 0x48
54 #define ETH_HASH1 0x4c
55 #define ETH_TXCTRL 0x50
59 #define MODER_RXEN (1 << 0) /* receive enable */
60 #define MODER_TXEN (1 << 1) /* transmit enable */
61 #define MODER_NOPRE (1 << 2) /* no preamble */
62 #define MODER_BRO (1 << 3) /* broadcast address */
63 #define MODER_IAM (1 << 4) /* individual address mode */
64 #define MODER_PRO (1 << 5) /* promiscuous mode */
65 #define MODER_IFG (1 << 6) /* interframe gap for incoming frames */
66 #define MODER_LOOP (1 << 7) /* loopback */
67 #define MODER_NBO (1 << 8) /* no back-off */
68 #define MODER_EDE (1 << 9) /* excess defer enable */
69 #define MODER_FULLD (1 << 10) /* full duplex */
70 #define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */
71 #define MODER_DCRC (1 << 12) /* delayed CRC enable */
72 #define MODER_CRC (1 << 13) /* CRC enable */
73 #define MODER_HUGE (1 << 14) /* huge packets enable */
74 #define MODER_PAD (1 << 15) /* padding enabled */
75 #define MODER_RSM (1 << 16) /* receive small packets */
77 /* interrupt source and mask registers */
78 #define INT_MASK_TXF (1 << 0) /* transmit frame */
79 #define INT_MASK_TXE (1 << 1) /* transmit error */
80 #define INT_MASK_RXF (1 << 2) /* receive frame */
81 #define INT_MASK_RXE (1 << 3) /* receive error */
82 #define INT_MASK_BUSY (1 << 4)
83 #define INT_MASK_TXC (1 << 5) /* transmit control frame */
84 #define INT_MASK_RXC (1 << 6) /* receive control frame */
86 #define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE)
87 #define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE)
89 #define INT_MASK_ALL ( \
90 INT_MASK_TXF | INT_MASK_TXE | \
91 INT_MASK_RXF | INT_MASK_RXE | \
92 INT_MASK_TXC | INT_MASK_RXC | \
96 /* packet length register */
97 #define PACKETLEN_MIN(min) (((min) & 0xffff) << 16)
98 #define PACKETLEN_MAX(max) (((max) & 0xffff) << 0)
99 #define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \
102 /* transmit buffer number register */
103 #define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80)
105 /* control module mode register */
106 #define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */
107 #define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */
108 #define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */
110 /* MII mode register */
111 #define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */
112 #define MIIMODER_NOPRE (1 << 8) /* no preamble */
114 /* MII command register */
115 #define MIICOMMAND_SCAN (1 << 0) /* scan status */
116 #define MIICOMMAND_READ (1 << 1) /* read status */
117 #define MIICOMMAND_WRITE (1 << 2) /* write control data */
119 /* MII address register */
120 #define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0)
121 #define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8)
122 #define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \
123 MIIADDRESS_RGAD(reg))
125 /* MII transmit data register */
126 #define MIITX_DATA_VAL(x) ((x) & 0xffff)
128 /* MII receive data register */
129 #define MIIRX_DATA_VAL(x) ((x) & 0xffff)
131 /* MII status register */
132 #define MIISTATUS_LINKFAIL (1 << 0)
133 #define MIISTATUS_BUSY (1 << 1)
134 #define MIISTATUS_INVALID (1 << 2)
136 /* TX buffer descriptor */
137 #define TX_BD_CS (1 << 0) /* carrier sense lost */
138 #define TX_BD_DF (1 << 1) /* defer indication */
139 #define TX_BD_LC (1 << 2) /* late collision */
140 #define TX_BD_RL (1 << 3) /* retransmission limit */
141 #define TX_BD_RETRY_MASK (0x00f0)
142 #define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4)
143 #define TX_BD_UR (1 << 8) /* transmitter underrun */
144 #define TX_BD_CRC (1 << 11) /* TX CRC enable */
145 #define TX_BD_PAD (1 << 12) /* pad enable for short packets */
146 #define TX_BD_WRAP (1 << 13)
147 #define TX_BD_IRQ (1 << 14) /* interrupt request enable */
148 #define TX_BD_READY (1 << 15) /* TX buffer ready */
149 #define TX_BD_LEN(x) (((x) & 0xffff) << 16)
150 #define TX_BD_LEN_MASK (0xffff << 16)
152 #define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \
153 TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR)
155 /* RX buffer descriptor */
156 #define RX_BD_LC (1 << 0) /* late collision */
157 #define RX_BD_CRC (1 << 1) /* RX CRC error */
158 #define RX_BD_SF (1 << 2) /* short frame */
159 #define RX_BD_TL (1 << 3) /* too long */
160 #define RX_BD_DN (1 << 4) /* dribble nibble */
161 #define RX_BD_IS (1 << 5) /* invalid symbol */
162 #define RX_BD_OR (1 << 6) /* receiver overrun */
163 #define RX_BD_MISS (1 << 7)
164 #define RX_BD_CF (1 << 8) /* control frame */
165 #define RX_BD_WRAP (1 << 13)
166 #define RX_BD_IRQ (1 << 14) /* interrupt request enable */
167 #define RX_BD_EMPTY (1 << 15)
168 #define RX_BD_LEN(x) (((x) & 0xffff) << 16)
170 #define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \
171 RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS)
173 #define ETHOC_BUFSIZ 1536
174 #define ETHOC_ZLEN 64
175 #define ETHOC_BD_BASE 0x400
176 #define ETHOC_TIMEOUT (HZ / 2)
177 #define ETHOC_MII_TIMEOUT (1 + (HZ / 5))
180 * struct ethoc - driver-private device structure
181 * @iobase: pointer to I/O memory region
182 * @membase: pointer to buffer memory region
183 * @dma_alloc: dma allocated buffer size
184 * @io_region_size: I/O memory region size
185 * @num_bd: number of buffer descriptors
186 * @num_tx: number of send buffers
187 * @cur_tx: last send buffer written
188 * @dty_tx: last buffer actually sent
189 * @num_rx: number of receive buffers
190 * @cur_rx: current receive buffer
191 * @vma: pointer to array of virtual memory addresses for buffers
192 * @netdev: pointer to network device structure
193 * @napi: NAPI structure
194 * @msg_enable: device state flags
196 * @mdio: MDIO bus for PHY access
197 * @phy_id: address of attached PHY
200 void __iomem *iobase;
201 void __iomem *membase;
203 resource_size_t io_region_size;
216 struct net_device *netdev;
217 struct napi_struct napi;
222 struct mii_bus *mdio;
231 * struct ethoc_bd - buffer descriptor
232 * @stat: buffer statistics
233 * @addr: physical memory address
240 static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
243 return ioread32be(dev->iobase + offset);
245 return ioread32(dev->iobase + offset);
248 static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
251 iowrite32be(data, dev->iobase + offset);
253 iowrite32(data, dev->iobase + offset);
256 static inline void ethoc_read_bd(struct ethoc *dev, int index,
259 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
260 bd->stat = ethoc_read(dev, offset + 0);
261 bd->addr = ethoc_read(dev, offset + 4);
264 static inline void ethoc_write_bd(struct ethoc *dev, int index,
265 const struct ethoc_bd *bd)
267 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
268 ethoc_write(dev, offset + 0, bd->stat);
269 ethoc_write(dev, offset + 4, bd->addr);
272 static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
274 u32 imask = ethoc_read(dev, INT_MASK);
276 ethoc_write(dev, INT_MASK, imask);
279 static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
281 u32 imask = ethoc_read(dev, INT_MASK);
283 ethoc_write(dev, INT_MASK, imask);
286 static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
288 ethoc_write(dev, INT_SOURCE, mask);
291 static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
293 u32 mode = ethoc_read(dev, MODER);
294 mode |= MODER_RXEN | MODER_TXEN;
295 ethoc_write(dev, MODER, mode);
298 static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
300 u32 mode = ethoc_read(dev, MODER);
301 mode &= ~(MODER_RXEN | MODER_TXEN);
302 ethoc_write(dev, MODER, mode);
305 static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
315 ethoc_write(dev, TX_BD_NUM, dev->num_tx);
317 /* setup transmission buffers */
319 bd.stat = TX_BD_IRQ | TX_BD_CRC;
322 for (i = 0; i < dev->num_tx; i++) {
323 if (i == dev->num_tx - 1)
324 bd.stat |= TX_BD_WRAP;
326 ethoc_write_bd(dev, i, &bd);
327 bd.addr += ETHOC_BUFSIZ;
333 bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
335 for (i = 0; i < dev->num_rx; i++) {
336 if (i == dev->num_rx - 1)
337 bd.stat |= RX_BD_WRAP;
339 ethoc_write_bd(dev, dev->num_tx + i, &bd);
340 bd.addr += ETHOC_BUFSIZ;
342 dev->vma[dev->num_tx + i] = vma;
349 static int ethoc_reset(struct ethoc *dev)
353 /* TODO: reset controller? */
355 ethoc_disable_rx_and_tx(dev);
357 /* TODO: setup registers */
359 /* enable FCS generation and automatic padding */
360 mode = ethoc_read(dev, MODER);
361 mode |= MODER_CRC | MODER_PAD;
362 ethoc_write(dev, MODER, mode);
364 /* set full-duplex mode */
365 mode = ethoc_read(dev, MODER);
367 ethoc_write(dev, MODER, mode);
368 ethoc_write(dev, IPGT, 0x15);
370 ethoc_ack_irq(dev, INT_MASK_ALL);
371 ethoc_enable_irq(dev, INT_MASK_ALL);
372 ethoc_enable_rx_and_tx(dev);
376 static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
379 struct net_device *netdev = dev->netdev;
380 unsigned int ret = 0;
382 if (bd->stat & RX_BD_TL) {
383 dev_err(&netdev->dev, "RX: frame too long\n");
384 netdev->stats.rx_length_errors++;
388 if (bd->stat & RX_BD_SF) {
389 dev_err(&netdev->dev, "RX: frame too short\n");
390 netdev->stats.rx_length_errors++;
394 if (bd->stat & RX_BD_DN) {
395 dev_err(&netdev->dev, "RX: dribble nibble\n");
396 netdev->stats.rx_frame_errors++;
399 if (bd->stat & RX_BD_CRC) {
400 dev_err(&netdev->dev, "RX: wrong CRC\n");
401 netdev->stats.rx_crc_errors++;
405 if (bd->stat & RX_BD_OR) {
406 dev_err(&netdev->dev, "RX: overrun\n");
407 netdev->stats.rx_over_errors++;
411 if (bd->stat & RX_BD_MISS)
412 netdev->stats.rx_missed_errors++;
414 if (bd->stat & RX_BD_LC) {
415 dev_err(&netdev->dev, "RX: late collision\n");
416 netdev->stats.collisions++;
423 static int ethoc_rx(struct net_device *dev, int limit)
425 struct ethoc *priv = netdev_priv(dev);
428 for (count = 0; count < limit; ++count) {
432 entry = priv->num_tx + priv->cur_rx;
433 ethoc_read_bd(priv, entry, &bd);
434 if (bd.stat & RX_BD_EMPTY) {
435 ethoc_ack_irq(priv, INT_MASK_RX);
436 /* If packet (interrupt) came in between checking
437 * BD_EMTPY and clearing the interrupt source, then we
438 * risk missing the packet as the RX interrupt won't
439 * trigger right away when we reenable it; hence, check
440 * BD_EMTPY here again to make sure there isn't such a
441 * packet waiting for us...
443 ethoc_read_bd(priv, entry, &bd);
444 if (bd.stat & RX_BD_EMPTY)
448 if (ethoc_update_rx_stats(priv, &bd) == 0) {
449 int size = bd.stat >> 16;
452 size -= 4; /* strip the CRC */
453 skb = netdev_alloc_skb_ip_align(dev, size);
456 void *src = priv->vma[entry];
457 memcpy_fromio(skb_put(skb, size), src, size);
458 skb->protocol = eth_type_trans(skb, dev);
459 dev->stats.rx_packets++;
460 dev->stats.rx_bytes += size;
461 netif_receive_skb(skb);
465 "low on memory - packet dropped\n");
467 dev->stats.rx_dropped++;
472 /* clear the buffer descriptor so it can be reused */
473 bd.stat &= ~RX_BD_STATS;
474 bd.stat |= RX_BD_EMPTY;
475 ethoc_write_bd(priv, entry, &bd);
476 if (++priv->cur_rx == priv->num_rx)
483 static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
485 struct net_device *netdev = dev->netdev;
487 if (bd->stat & TX_BD_LC) {
488 dev_err(&netdev->dev, "TX: late collision\n");
489 netdev->stats.tx_window_errors++;
492 if (bd->stat & TX_BD_RL) {
493 dev_err(&netdev->dev, "TX: retransmit limit\n");
494 netdev->stats.tx_aborted_errors++;
497 if (bd->stat & TX_BD_UR) {
498 dev_err(&netdev->dev, "TX: underrun\n");
499 netdev->stats.tx_fifo_errors++;
502 if (bd->stat & TX_BD_CS) {
503 dev_err(&netdev->dev, "TX: carrier sense lost\n");
504 netdev->stats.tx_carrier_errors++;
507 if (bd->stat & TX_BD_STATS)
508 netdev->stats.tx_errors++;
510 netdev->stats.collisions += (bd->stat >> 4) & 0xf;
511 netdev->stats.tx_bytes += bd->stat >> 16;
512 netdev->stats.tx_packets++;
515 static int ethoc_tx(struct net_device *dev, int limit)
517 struct ethoc *priv = netdev_priv(dev);
521 for (count = 0; count < limit; ++count) {
524 entry = priv->dty_tx & (priv->num_tx-1);
526 ethoc_read_bd(priv, entry, &bd);
528 if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
529 ethoc_ack_irq(priv, INT_MASK_TX);
530 /* If interrupt came in between reading in the BD
531 * and clearing the interrupt source, then we risk
532 * missing the event as the TX interrupt won't trigger
533 * right away when we reenable it; hence, check
534 * BD_EMPTY here again to make sure there isn't such an
537 ethoc_read_bd(priv, entry, &bd);
538 if (bd.stat & TX_BD_READY ||
539 (priv->dty_tx == priv->cur_tx))
543 ethoc_update_tx_stats(priv, &bd);
547 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
548 netif_wake_queue(dev);
553 static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
555 struct net_device *dev = dev_id;
556 struct ethoc *priv = netdev_priv(dev);
560 /* Figure out what triggered the interrupt...
561 * The tricky bit here is that the interrupt source bits get
562 * set in INT_SOURCE for an event regardless of whether that
563 * event is masked or not. Thus, in order to figure out what
564 * triggered the interrupt, we need to remove the sources
565 * for all events that are currently masked. This behaviour
566 * is not particularly well documented but reasonable...
568 mask = ethoc_read(priv, INT_MASK);
569 pending = ethoc_read(priv, INT_SOURCE);
572 if (unlikely(pending == 0))
575 ethoc_ack_irq(priv, pending);
577 /* We always handle the dropped packet interrupt */
578 if (pending & INT_MASK_BUSY) {
579 dev_dbg(&dev->dev, "packet dropped\n");
580 dev->stats.rx_dropped++;
583 /* Handle receive/transmit event by switching to polling */
584 if (pending & (INT_MASK_TX | INT_MASK_RX)) {
585 ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
586 napi_schedule(&priv->napi);
592 static int ethoc_get_mac_address(struct net_device *dev, void *addr)
594 struct ethoc *priv = netdev_priv(dev);
595 u8 *mac = (u8 *)addr;
598 reg = ethoc_read(priv, MAC_ADDR0);
599 mac[2] = (reg >> 24) & 0xff;
600 mac[3] = (reg >> 16) & 0xff;
601 mac[4] = (reg >> 8) & 0xff;
602 mac[5] = (reg >> 0) & 0xff;
604 reg = ethoc_read(priv, MAC_ADDR1);
605 mac[0] = (reg >> 8) & 0xff;
606 mac[1] = (reg >> 0) & 0xff;
611 static int ethoc_poll(struct napi_struct *napi, int budget)
613 struct ethoc *priv = container_of(napi, struct ethoc, napi);
614 int rx_work_done = 0;
615 int tx_work_done = 0;
617 rx_work_done = ethoc_rx(priv->netdev, budget);
618 tx_work_done = ethoc_tx(priv->netdev, budget);
620 if (rx_work_done < budget && tx_work_done < budget) {
622 ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
628 static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
630 struct ethoc *priv = bus->priv;
633 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
634 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
636 for (i = 0; i < 5; i++) {
637 u32 status = ethoc_read(priv, MIISTATUS);
638 if (!(status & MIISTATUS_BUSY)) {
639 u32 data = ethoc_read(priv, MIIRX_DATA);
640 /* reset MII command register */
641 ethoc_write(priv, MIICOMMAND, 0);
644 usleep_range(100, 200);
650 static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
652 struct ethoc *priv = bus->priv;
655 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
656 ethoc_write(priv, MIITX_DATA, val);
657 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
659 for (i = 0; i < 5; i++) {
660 u32 stat = ethoc_read(priv, MIISTATUS);
661 if (!(stat & MIISTATUS_BUSY)) {
662 /* reset MII command register */
663 ethoc_write(priv, MIICOMMAND, 0);
666 usleep_range(100, 200);
672 static void ethoc_mdio_poll(struct net_device *dev)
674 struct ethoc *priv = netdev_priv(dev);
675 struct phy_device *phydev = dev->phydev;
676 bool changed = false;
679 if (priv->old_link != phydev->link) {
681 priv->old_link = phydev->link;
684 if (priv->old_duplex != phydev->duplex) {
686 priv->old_duplex = phydev->duplex;
692 mode = ethoc_read(priv, MODER);
693 if (phydev->duplex == DUPLEX_FULL)
696 mode &= ~MODER_FULLD;
697 ethoc_write(priv, MODER, mode);
699 phy_print_status(phydev);
702 static int ethoc_mdio_probe(struct net_device *dev)
704 struct ethoc *priv = netdev_priv(dev);
705 struct phy_device *phy;
708 if (priv->phy_id != -1)
709 phy = mdiobus_get_phy(priv->mdio, priv->phy_id);
711 phy = phy_find_first(priv->mdio);
714 dev_err(&dev->dev, "no PHY found\n");
718 priv->old_duplex = -1;
721 err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
722 PHY_INTERFACE_MODE_GMII);
724 dev_err(&dev->dev, "could not attach to PHY\n");
728 phy->advertising &= ~(ADVERTISED_1000baseT_Full |
729 ADVERTISED_1000baseT_Half);
730 phy->supported &= ~(SUPPORTED_1000baseT_Full |
731 SUPPORTED_1000baseT_Half);
736 static int ethoc_open(struct net_device *dev)
738 struct ethoc *priv = netdev_priv(dev);
741 ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
746 ethoc_init_ring(priv, dev->mem_start);
749 if (netif_queue_stopped(dev)) {
750 dev_dbg(&dev->dev, " resuming queue\n");
751 netif_wake_queue(dev);
753 dev_dbg(&dev->dev, " starting queue\n");
754 netif_start_queue(dev);
758 priv->old_duplex = -1;
760 phy_start(dev->phydev);
761 napi_enable(&priv->napi);
763 if (netif_msg_ifup(priv)) {
764 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
765 dev->base_addr, dev->mem_start, dev->mem_end);
771 static int ethoc_stop(struct net_device *dev)
773 struct ethoc *priv = netdev_priv(dev);
775 napi_disable(&priv->napi);
778 phy_stop(dev->phydev);
780 ethoc_disable_rx_and_tx(priv);
781 free_irq(dev->irq, dev);
783 if (!netif_queue_stopped(dev))
784 netif_stop_queue(dev);
789 static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
791 struct ethoc *priv = netdev_priv(dev);
792 struct mii_ioctl_data *mdio = if_mii(ifr);
793 struct phy_device *phy = NULL;
795 if (!netif_running(dev))
798 if (cmd != SIOCGMIIPHY) {
799 if (mdio->phy_id >= PHY_MAX_ADDR)
802 phy = mdiobus_get_phy(priv->mdio, mdio->phy_id);
809 return phy_mii_ioctl(phy, ifr, cmd);
812 static void ethoc_do_set_mac_address(struct net_device *dev)
814 struct ethoc *priv = netdev_priv(dev);
815 unsigned char *mac = dev->dev_addr;
817 ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
818 (mac[4] << 8) | (mac[5] << 0));
819 ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0));
822 static int ethoc_set_mac_address(struct net_device *dev, void *p)
824 const struct sockaddr *addr = p;
826 if (!is_valid_ether_addr(addr->sa_data))
827 return -EADDRNOTAVAIL;
828 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
829 ethoc_do_set_mac_address(dev);
833 static void ethoc_set_multicast_list(struct net_device *dev)
835 struct ethoc *priv = netdev_priv(dev);
836 u32 mode = ethoc_read(priv, MODER);
837 struct netdev_hw_addr *ha;
838 u32 hash[2] = { 0, 0 };
840 /* set loopback mode if requested */
841 if (dev->flags & IFF_LOOPBACK)
846 /* receive broadcast frames if requested */
847 if (dev->flags & IFF_BROADCAST)
852 /* enable promiscuous mode if requested */
853 if (dev->flags & IFF_PROMISC)
858 ethoc_write(priv, MODER, mode);
860 /* receive multicast frames */
861 if (dev->flags & IFF_ALLMULTI) {
862 hash[0] = 0xffffffff;
863 hash[1] = 0xffffffff;
865 netdev_for_each_mc_addr(ha, dev) {
866 u32 crc = ether_crc(ETH_ALEN, ha->addr);
867 int bit = (crc >> 26) & 0x3f;
868 hash[bit >> 5] |= 1 << (bit & 0x1f);
872 ethoc_write(priv, ETH_HASH0, hash[0]);
873 ethoc_write(priv, ETH_HASH1, hash[1]);
876 static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
881 static void ethoc_tx_timeout(struct net_device *dev)
883 struct ethoc *priv = netdev_priv(dev);
884 u32 pending = ethoc_read(priv, INT_SOURCE);
886 ethoc_interrupt(dev->irq, dev);
889 static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
891 struct ethoc *priv = netdev_priv(dev);
896 if (skb_put_padto(skb, ETHOC_ZLEN)) {
897 dev->stats.tx_errors++;
901 if (unlikely(skb->len > ETHOC_BUFSIZ)) {
902 dev->stats.tx_errors++;
906 entry = priv->cur_tx % priv->num_tx;
907 spin_lock_irq(&priv->lock);
910 ethoc_read_bd(priv, entry, &bd);
911 if (unlikely(skb->len < ETHOC_ZLEN))
912 bd.stat |= TX_BD_PAD;
914 bd.stat &= ~TX_BD_PAD;
916 dest = priv->vma[entry];
917 memcpy_toio(dest, skb->data, skb->len);
919 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
920 bd.stat |= TX_BD_LEN(skb->len);
921 ethoc_write_bd(priv, entry, &bd);
923 bd.stat |= TX_BD_READY;
924 ethoc_write_bd(priv, entry, &bd);
926 if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) {
927 dev_dbg(&dev->dev, "stopping queue\n");
928 netif_stop_queue(dev);
931 spin_unlock_irq(&priv->lock);
932 skb_tx_timestamp(skb);
939 static int ethoc_get_regs_len(struct net_device *netdev)
944 static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
947 struct ethoc *priv = netdev_priv(dev);
952 for (i = 0; i < ETH_END / sizeof(u32); ++i)
953 regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
956 static void ethoc_get_ringparam(struct net_device *dev,
957 struct ethtool_ringparam *ring)
959 struct ethoc *priv = netdev_priv(dev);
961 ring->rx_max_pending = priv->num_bd - 1;
962 ring->rx_mini_max_pending = 0;
963 ring->rx_jumbo_max_pending = 0;
964 ring->tx_max_pending = priv->num_bd - 1;
966 ring->rx_pending = priv->num_rx;
967 ring->rx_mini_pending = 0;
968 ring->rx_jumbo_pending = 0;
969 ring->tx_pending = priv->num_tx;
972 static int ethoc_set_ringparam(struct net_device *dev,
973 struct ethtool_ringparam *ring)
975 struct ethoc *priv = netdev_priv(dev);
977 if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
978 ring->tx_pending + ring->rx_pending > priv->num_bd)
980 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
983 if (netif_running(dev)) {
984 netif_tx_disable(dev);
985 ethoc_disable_rx_and_tx(priv);
986 ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
987 synchronize_irq(dev->irq);
990 priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
991 priv->num_rx = ring->rx_pending;
992 ethoc_init_ring(priv, dev->mem_start);
994 if (netif_running(dev)) {
995 ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
996 ethoc_enable_rx_and_tx(priv);
997 netif_wake_queue(dev);
1002 const struct ethtool_ops ethoc_ethtool_ops = {
1003 .get_regs_len = ethoc_get_regs_len,
1004 .get_regs = ethoc_get_regs,
1005 .nway_reset = phy_ethtool_nway_reset,
1006 .get_link = ethtool_op_get_link,
1007 .get_ringparam = ethoc_get_ringparam,
1008 .set_ringparam = ethoc_set_ringparam,
1009 .get_ts_info = ethtool_op_get_ts_info,
1010 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1011 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1014 static const struct net_device_ops ethoc_netdev_ops = {
1015 .ndo_open = ethoc_open,
1016 .ndo_stop = ethoc_stop,
1017 .ndo_do_ioctl = ethoc_ioctl,
1018 .ndo_set_mac_address = ethoc_set_mac_address,
1019 .ndo_set_rx_mode = ethoc_set_multicast_list,
1020 .ndo_change_mtu = ethoc_change_mtu,
1021 .ndo_tx_timeout = ethoc_tx_timeout,
1022 .ndo_start_xmit = ethoc_start_xmit,
1026 * ethoc_probe - initialize OpenCores ethernet MAC
1027 * pdev: platform device
1029 static int ethoc_probe(struct platform_device *pdev)
1031 struct net_device *netdev = NULL;
1032 struct resource *res = NULL;
1033 struct resource *mmio = NULL;
1034 struct resource *mem = NULL;
1035 struct ethoc *priv = NULL;
1038 bool random_mac = false;
1039 struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
1040 u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
1042 /* allocate networking device */
1043 netdev = alloc_etherdev(sizeof(struct ethoc));
1049 SET_NETDEV_DEV(netdev, &pdev->dev);
1050 platform_set_drvdata(pdev, netdev);
1052 /* obtain I/O memory space */
1053 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1055 dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
1060 mmio = devm_request_mem_region(&pdev->dev, res->start,
1061 resource_size(res), res->name);
1063 dev_err(&pdev->dev, "cannot request I/O memory space\n");
1068 netdev->base_addr = mmio->start;
1070 /* obtain buffer memory space */
1071 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1073 mem = devm_request_mem_region(&pdev->dev, res->start,
1074 resource_size(res), res->name);
1076 dev_err(&pdev->dev, "cannot request memory space\n");
1081 netdev->mem_start = mem->start;
1082 netdev->mem_end = mem->end;
1086 /* obtain device IRQ number */
1087 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1089 dev_err(&pdev->dev, "cannot obtain IRQ\n");
1094 netdev->irq = res->start;
1096 /* setup driver-private data */
1097 priv = netdev_priv(netdev);
1098 priv->netdev = netdev;
1099 priv->dma_alloc = 0;
1100 priv->io_region_size = resource_size(mmio);
1102 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
1103 resource_size(mmio));
1104 if (!priv->iobase) {
1105 dev_err(&pdev->dev, "cannot remap I/O memory space\n");
1110 if (netdev->mem_end) {
1111 priv->membase = devm_ioremap_nocache(&pdev->dev,
1112 netdev->mem_start, resource_size(mem));
1113 if (!priv->membase) {
1114 dev_err(&pdev->dev, "cannot remap memory space\n");
1119 /* Allocate buffer memory */
1120 priv->membase = dmam_alloc_coherent(&pdev->dev,
1121 buffer_size, (void *)&netdev->mem_start,
1123 if (!priv->membase) {
1124 dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
1129 netdev->mem_end = netdev->mem_start + buffer_size;
1130 priv->dma_alloc = buffer_size;
1133 priv->big_endian = pdata ? pdata->big_endian :
1134 of_device_is_big_endian(pdev->dev.of_node);
1136 /* calculate the number of TX/RX buffers, maximum 128 supported */
1137 num_bd = min_t(unsigned int,
1138 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
1143 priv->num_bd = num_bd;
1144 /* num_tx must be a power of two */
1145 priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
1146 priv->num_rx = num_bd - priv->num_tx;
1148 dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
1149 priv->num_tx, priv->num_rx);
1151 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
1157 /* Allow the platform setup code to pass in a MAC address. */
1159 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
1160 priv->phy_id = pdata->phy_id;
1164 mac = of_get_mac_address(pdev->dev.of_node);
1166 memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
1170 /* Check that the given MAC address is valid. If it isn't, read the
1171 * current MAC from the controller.
1173 if (!is_valid_ether_addr(netdev->dev_addr))
1174 ethoc_get_mac_address(netdev, netdev->dev_addr);
1176 /* Check the MAC again for validity, if it still isn't choose and
1177 * program a random one.
1179 if (!is_valid_ether_addr(netdev->dev_addr)) {
1180 eth_random_addr(netdev->dev_addr);
1184 ethoc_do_set_mac_address(netdev);
1187 netdev->addr_assign_type = NET_ADDR_RANDOM;
1189 /* Allow the platform setup code to adjust MII management bus clock. */
1191 struct clk *clk = devm_clk_get(&pdev->dev, NULL);
1195 clk_prepare_enable(clk);
1196 eth_clkfreq = clk_get_rate(clk);
1200 u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
1204 dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
1205 ethoc_write(priv, MIIMODER,
1206 (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
1210 /* register MII bus */
1211 priv->mdio = mdiobus_alloc();
1217 priv->mdio->name = "ethoc-mdio";
1218 snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d",
1219 priv->mdio->name, pdev->id);
1220 priv->mdio->read = ethoc_mdio_read;
1221 priv->mdio->write = ethoc_mdio_write;
1222 priv->mdio->priv = priv;
1224 ret = mdiobus_register(priv->mdio);
1226 dev_err(&netdev->dev, "failed to register MDIO bus\n");
1230 ret = ethoc_mdio_probe(netdev);
1232 dev_err(&netdev->dev, "failed to probe MDIO bus\n");
1236 /* setup the net_device structure */
1237 netdev->netdev_ops = ðoc_netdev_ops;
1238 netdev->watchdog_timeo = ETHOC_TIMEOUT;
1239 netdev->features |= 0;
1240 netdev->ethtool_ops = ðoc_ethtool_ops;
1243 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1245 spin_lock_init(&priv->lock);
1247 ret = register_netdev(netdev);
1249 dev_err(&netdev->dev, "failed to register interface\n");
1256 netif_napi_del(&priv->napi);
1258 mdiobus_unregister(priv->mdio);
1259 mdiobus_free(priv->mdio);
1262 clk_disable_unprepare(priv->clk);
1264 free_netdev(netdev);
1270 * ethoc_remove - shutdown OpenCores ethernet MAC
1271 * @pdev: platform device
1273 static int ethoc_remove(struct platform_device *pdev)
1275 struct net_device *netdev = platform_get_drvdata(pdev);
1276 struct ethoc *priv = netdev_priv(netdev);
1279 netif_napi_del(&priv->napi);
1280 phy_disconnect(netdev->phydev);
1283 mdiobus_unregister(priv->mdio);
1284 mdiobus_free(priv->mdio);
1287 clk_disable_unprepare(priv->clk);
1288 unregister_netdev(netdev);
1289 free_netdev(netdev);
1296 static int ethoc_suspend(struct platform_device *pdev, pm_message_t state)
1301 static int ethoc_resume(struct platform_device *pdev)
1306 # define ethoc_suspend NULL
1307 # define ethoc_resume NULL
1310 static const struct of_device_id ethoc_match[] = {
1311 { .compatible = "opencores,ethoc", },
1314 MODULE_DEVICE_TABLE(of, ethoc_match);
1316 static struct platform_driver ethoc_driver = {
1317 .probe = ethoc_probe,
1318 .remove = ethoc_remove,
1319 .suspend = ethoc_suspend,
1320 .resume = ethoc_resume,
1323 .of_match_table = ethoc_match,
1327 module_platform_driver(ethoc_driver);
1329 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1330 MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
1331 MODULE_LICENSE("GPL v2");