2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/ptrace.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
40 #include <linux/tcp.h>
41 #include <linux/udp.h>
42 #include <linux/icmp.h>
43 #include <linux/spinlock.h>
44 #include <linux/workqueue.h>
45 #include <linux/bitops.h>
47 #include <linux/irq.h>
48 #include <linux/clk.h>
49 #include <linux/platform_device.h>
50 #include <linux/phy.h>
51 #include <linux/fec.h>
53 #include <linux/of_device.h>
54 #include <linux/of_gpio.h>
55 #include <linux/of_mdio.h>
56 #include <linux/of_net.h>
57 #include <linux/regulator/consumer.h>
58 #include <linux/if_vlan.h>
59 #include <linux/pinctrl/consumer.h>
61 #include <asm/cacheflush.h>
65 static void set_multicast_list(struct net_device *ndev);
67 #if defined(CONFIG_ARM)
68 #define FEC_ALIGNMENT 0xf
70 #define FEC_ALIGNMENT 0x3
73 #define DRIVER_NAME "fec"
75 /* Pause frame feild and FIFO threshold */
76 #define FEC_ENET_FCE (1 << 5)
77 #define FEC_ENET_RSEM_V 0x84
78 #define FEC_ENET_RSFL_V 16
79 #define FEC_ENET_RAEM_V 0x8
80 #define FEC_ENET_RAFL_V 0x8
81 #define FEC_ENET_OPD_V 0xFFF0
83 /* Controller is ENET-MAC */
84 #define FEC_QUIRK_ENET_MAC (1 << 0)
85 /* Controller needs driver to swap frame */
86 #define FEC_QUIRK_SWAP_FRAME (1 << 1)
87 /* Controller uses gasket */
88 #define FEC_QUIRK_USE_GASKET (1 << 2)
89 /* Controller has GBIT support */
90 #define FEC_QUIRK_HAS_GBIT (1 << 3)
91 /* Controller has extend desc buffer */
92 #define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
93 /* Controller has hardware checksum support */
94 #define FEC_QUIRK_HAS_CSUM (1 << 5)
95 /* Controller has hardware vlan support */
96 #define FEC_QUIRK_HAS_VLAN (1 << 6)
97 /* ENET IP errata ERR006358
99 * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
100 * detected as not set during a prior frame transmission, then the
101 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
102 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
103 * frames not being transmitted until there is a 0-to-1 transition on
106 #define FEC_QUIRK_ERR006358 (1 << 7)
108 static struct platform_device_id fec_devtype[] = {
110 /* keep it for coldfire */
115 .driver_data = FEC_QUIRK_USE_GASKET,
121 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
124 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
125 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
126 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
128 .name = "mvf600-fec",
129 .driver_data = FEC_QUIRK_ENET_MAC,
134 MODULE_DEVICE_TABLE(platform, fec_devtype);
137 IMX25_FEC = 1, /* runs on i.mx25/50/53 */
138 IMX27_FEC, /* runs on i.mx27/35/51 */
144 static const struct of_device_id fec_dt_ids[] = {
145 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
146 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
147 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
148 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
149 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
152 MODULE_DEVICE_TABLE(of, fec_dt_ids);
154 static unsigned char macaddr[ETH_ALEN];
155 module_param_array(macaddr, byte, NULL, 0);
156 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
158 #if defined(CONFIG_M5272)
160 * Some hardware gets it MAC address out of local flash memory.
161 * if this is non-zero then assume it is the address to get MAC from.
163 #if defined(CONFIG_NETtel)
164 #define FEC_FLASHMAC 0xf0006006
165 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
166 #define FEC_FLASHMAC 0xf0006000
167 #elif defined(CONFIG_CANCam)
168 #define FEC_FLASHMAC 0xf0020000
169 #elif defined (CONFIG_M5272C3)
170 #define FEC_FLASHMAC (0xffe04000 + 4)
171 #elif defined(CONFIG_MOD5272)
172 #define FEC_FLASHMAC 0xffc0406b
174 #define FEC_FLASHMAC 0
176 #endif /* CONFIG_M5272 */
178 /* Interrupt events/masks. */
179 #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
180 #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
181 #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
182 #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
183 #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
184 #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
185 #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
186 #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
187 #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
188 #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
190 #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
191 #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
193 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
195 #define PKT_MAXBUF_SIZE 1522
196 #define PKT_MINBUF_SIZE 64
197 #define PKT_MAXBLR_SIZE 1536
199 /* FEC receive acceleration */
200 #define FEC_RACC_IPDIS (1 << 1)
201 #define FEC_RACC_PRODIS (1 << 2)
202 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
205 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
206 * size bits. Other FEC hardware does not, so we need to take that into
207 * account when setting it.
209 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
210 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
211 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
213 #define OPT_FRAME_SIZE 0
216 /* FEC MII MMFR bits definition */
217 #define FEC_MMFR_ST (1 << 30)
218 #define FEC_MMFR_OP_READ (2 << 28)
219 #define FEC_MMFR_OP_WRITE (1 << 28)
220 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
221 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
222 #define FEC_MMFR_TA (2 << 16)
223 #define FEC_MMFR_DATA(v) (v & 0xffff)
225 #define FEC_MII_TIMEOUT 30000 /* us */
227 /* Transmitter timeout */
228 #define TX_TIMEOUT (2 * HZ)
230 #define FEC_PAUSE_FLAG_AUTONEG 0x1
231 #define FEC_PAUSE_FLAG_ENABLE 0x2
233 #define TSO_HEADER_SIZE 128
234 /* Max number of allowed TCP segments for software TSO */
235 #define FEC_MAX_TSO_SEGS 100
236 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
238 #define IS_TSO_HEADER(txq, addr) \
239 ((addr >= txq->tso_hdrs_dma) && \
240 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
245 struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
247 struct bufdesc *new_bd = bdp + 1;
248 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
249 struct bufdesc_ex *ex_base;
250 struct bufdesc *base;
253 if (bdp >= fep->tx_bd_base) {
254 base = fep->tx_bd_base;
255 ring_size = fep->tx_ring_size;
256 ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
258 base = fep->rx_bd_base;
259 ring_size = fep->rx_ring_size;
260 ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
264 return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
265 ex_base : ex_new_bd);
267 return (new_bd >= (base + ring_size)) ?
272 struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
274 struct bufdesc *new_bd = bdp - 1;
275 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
276 struct bufdesc_ex *ex_base;
277 struct bufdesc *base;
280 if (bdp >= fep->tx_bd_base) {
281 base = fep->tx_bd_base;
282 ring_size = fep->tx_ring_size;
283 ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
285 base = fep->rx_bd_base;
286 ring_size = fep->rx_ring_size;
287 ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
291 return (struct bufdesc *)((ex_new_bd < ex_base) ?
292 (ex_new_bd + ring_size) : ex_new_bd);
294 return (new_bd < base) ? (new_bd + ring_size) : new_bd;
297 static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
298 struct fec_enet_private *fep)
300 return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
303 static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
307 entries = ((const char *)fep->dirty_tx -
308 (const char *)fep->cur_tx) / fep->bufdesc_size - 1;
310 return entries > 0 ? entries : entries + fep->tx_ring_size;
313 static void *swap_buffer(void *bufaddr, int len)
316 unsigned int *buf = bufaddr;
318 for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++)
319 *buf = cpu_to_be32(*buf);
324 static void fec_dump(struct net_device *ndev)
326 struct fec_enet_private *fep = netdev_priv(ndev);
327 struct bufdesc *bdp = fep->tx_bd_base;
328 unsigned int index = 0;
330 netdev_info(ndev, "TX ring dump\n");
331 pr_info("Nr SC addr len SKB\n");
334 pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
336 bdp == fep->cur_tx ? 'S' : ' ',
337 bdp == fep->dirty_tx ? 'H' : ' ',
338 bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
339 fep->tx_skbuff[index]);
340 bdp = fec_enet_get_nextdesc(bdp, fep);
342 } while (bdp != fep->tx_bd_base);
345 static inline bool is_ipv4_pkt(struct sk_buff *skb)
347 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
351 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
353 /* Only run for packets requiring a checksum. */
354 if (skb->ip_summed != CHECKSUM_PARTIAL)
357 if (unlikely(skb_cow_head(skb, 0)))
360 if (is_ipv4_pkt(skb))
361 ip_hdr(skb)->check = 0;
362 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
368 fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
370 struct fec_enet_private *fep = netdev_priv(ndev);
371 const struct platform_device_id *id_entry =
372 platform_get_device_id(fep->pdev);
373 struct bufdesc *bdp = fep->cur_tx;
374 struct bufdesc_ex *ebdp;
375 int nr_frags = skb_shinfo(skb)->nr_frags;
377 unsigned short status;
378 unsigned int estatus = 0;
379 skb_frag_t *this_frag;
385 for (frag = 0; frag < nr_frags; frag++) {
386 this_frag = &skb_shinfo(skb)->frags[frag];
387 bdp = fec_enet_get_nextdesc(bdp, fep);
388 ebdp = (struct bufdesc_ex *)bdp;
390 status = bdp->cbd_sc;
391 status &= ~BD_ENET_TX_STATS;
392 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
393 frag_len = skb_shinfo(skb)->frags[frag].size;
395 /* Handle the last BD specially */
396 if (frag == nr_frags - 1) {
397 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
398 if (fep->bufdesc_ex) {
399 estatus |= BD_ENET_TX_INT;
400 if (unlikely(skb_shinfo(skb)->tx_flags &
401 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
402 estatus |= BD_ENET_TX_TS;
406 if (fep->bufdesc_ex) {
407 if (skb->ip_summed == CHECKSUM_PARTIAL)
408 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
410 ebdp->cbd_esc = estatus;
413 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
415 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
416 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
417 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
418 memcpy(fep->tx_bounce[index], bufaddr, frag_len);
419 bufaddr = fep->tx_bounce[index];
421 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
422 swap_buffer(bufaddr, frag_len);
425 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
427 if (dma_mapping_error(&fep->pdev->dev, addr)) {
428 dev_kfree_skb_any(skb);
430 netdev_err(ndev, "Tx DMA memory map failed\n");
431 goto dma_mapping_error;
434 bdp->cbd_bufaddr = addr;
435 bdp->cbd_datlen = frag_len;
436 bdp->cbd_sc = status;
445 for (i = 0; i < frag; i++) {
446 bdp = fec_enet_get_nextdesc(bdp, fep);
447 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
448 bdp->cbd_datlen, DMA_TO_DEVICE);
453 static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
455 struct fec_enet_private *fep = netdev_priv(ndev);
456 const struct platform_device_id *id_entry =
457 platform_get_device_id(fep->pdev);
458 int nr_frags = skb_shinfo(skb)->nr_frags;
459 struct bufdesc *bdp, *last_bdp;
462 unsigned short status;
463 unsigned short buflen;
464 unsigned int estatus = 0;
469 entries_free = fec_enet_get_free_txdesc_num(fep);
470 if (entries_free < MAX_SKB_FRAGS + 1) {
471 dev_kfree_skb_any(skb);
473 netdev_err(ndev, "NOT enough BD for SG!\n");
477 /* Protocol checksum off-load for TCP and UDP. */
478 if (fec_enet_clear_csum(skb, ndev)) {
479 dev_kfree_skb_any(skb);
483 /* Fill in a Tx ring entry */
485 status = bdp->cbd_sc;
486 status &= ~BD_ENET_TX_STATS;
488 /* Set buffer length and buffer pointer */
490 buflen = skb_headlen(skb);
492 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
493 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
494 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
495 memcpy(fep->tx_bounce[index], skb->data, buflen);
496 bufaddr = fep->tx_bounce[index];
498 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
499 swap_buffer(bufaddr, buflen);
502 /* Push the data cache so the CPM does not get stale memory data. */
503 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
504 if (dma_mapping_error(&fep->pdev->dev, addr)) {
505 dev_kfree_skb_any(skb);
507 netdev_err(ndev, "Tx DMA memory map failed\n");
512 ret = fec_enet_txq_submit_frag_skb(skb, ndev);
516 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
517 if (fep->bufdesc_ex) {
518 estatus = BD_ENET_TX_INT;
519 if (unlikely(skb_shinfo(skb)->tx_flags &
520 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
521 estatus |= BD_ENET_TX_TS;
525 if (fep->bufdesc_ex) {
527 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
529 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
531 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
533 if (skb->ip_summed == CHECKSUM_PARTIAL)
534 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
537 ebdp->cbd_esc = estatus;
540 last_bdp = fep->cur_tx;
541 index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
542 /* Save skb pointer */
543 fep->tx_skbuff[index] = skb;
545 bdp->cbd_datlen = buflen;
546 bdp->cbd_bufaddr = addr;
548 /* Send it on its way. Tell FEC it's ready, interrupt when done,
549 * it's the last BD of the frame, and to put the CRC on the end.
551 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
552 bdp->cbd_sc = status;
554 /* If this was the last BD in the ring, start at the beginning again. */
555 bdp = fec_enet_get_nextdesc(last_bdp, fep);
557 skb_tx_timestamp(skb);
561 /* Trigger transmission start */
562 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
568 fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
569 struct bufdesc *bdp, int index, char *data,
570 int size, bool last_tcp, bool is_last)
572 struct fec_enet_private *fep = netdev_priv(ndev);
573 const struct platform_device_id *id_entry =
574 platform_get_device_id(fep->pdev);
575 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
576 unsigned short status;
577 unsigned int estatus = 0;
580 status = bdp->cbd_sc;
581 status &= ~BD_ENET_TX_STATS;
583 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
585 if (((unsigned long) data) & FEC_ALIGNMENT ||
586 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
587 memcpy(fep->tx_bounce[index], data, size);
588 data = fep->tx_bounce[index];
590 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
591 swap_buffer(data, size);
594 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
595 if (dma_mapping_error(&fep->pdev->dev, addr)) {
596 dev_kfree_skb_any(skb);
598 netdev_err(ndev, "Tx DMA memory map failed\n");
599 return NETDEV_TX_BUSY;
602 bdp->cbd_datlen = size;
603 bdp->cbd_bufaddr = addr;
605 if (fep->bufdesc_ex) {
606 if (skb->ip_summed == CHECKSUM_PARTIAL)
607 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
609 ebdp->cbd_esc = estatus;
612 /* Handle the last BD specially */
614 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
616 status |= BD_ENET_TX_INTR;
618 ebdp->cbd_esc |= BD_ENET_TX_INT;
621 bdp->cbd_sc = status;
627 fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
628 struct bufdesc *bdp, int index)
630 struct fec_enet_private *fep = netdev_priv(ndev);
631 const struct platform_device_id *id_entry =
632 platform_get_device_id(fep->pdev);
633 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
634 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
636 unsigned long dmabuf;
637 unsigned short status;
638 unsigned int estatus = 0;
640 status = bdp->cbd_sc;
641 status &= ~BD_ENET_TX_STATS;
642 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
644 bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
645 dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
646 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
647 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
648 memcpy(fep->tx_bounce[index], skb->data, hdr_len);
649 bufaddr = fep->tx_bounce[index];
651 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
652 swap_buffer(bufaddr, hdr_len);
654 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
655 hdr_len, DMA_TO_DEVICE);
656 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
657 dev_kfree_skb_any(skb);
659 netdev_err(ndev, "Tx DMA memory map failed\n");
660 return NETDEV_TX_BUSY;
664 bdp->cbd_bufaddr = dmabuf;
665 bdp->cbd_datlen = hdr_len;
667 if (fep->bufdesc_ex) {
668 if (skb->ip_summed == CHECKSUM_PARTIAL)
669 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
671 ebdp->cbd_esc = estatus;
674 bdp->cbd_sc = status;
679 static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
681 struct fec_enet_private *fep = netdev_priv(ndev);
682 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
683 int total_len, data_left;
684 struct bufdesc *bdp = fep->cur_tx;
686 unsigned int index = 0;
689 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
690 dev_kfree_skb_any(skb);
692 netdev_err(ndev, "NOT enough BD for TSO!\n");
696 /* Protocol checksum off-load for TCP and UDP. */
697 if (fec_enet_clear_csum(skb, ndev)) {
698 dev_kfree_skb_any(skb);
702 /* Initialize the TSO handler, and prepare the first payload */
703 tso_start(skb, &tso);
705 total_len = skb->len - hdr_len;
706 while (total_len > 0) {
709 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
710 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
711 total_len -= data_left;
713 /* prepare packet headers: MAC + IP + TCP */
714 hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
715 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
716 ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
720 while (data_left > 0) {
723 size = min_t(int, tso.size, data_left);
724 bdp = fec_enet_get_nextdesc(bdp, fep);
725 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
726 ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
727 size, size == data_left,
733 tso_build_data(skb, &tso, size);
736 bdp = fec_enet_get_nextdesc(bdp, fep);
739 /* Save skb pointer */
740 fep->tx_skbuff[index] = skb;
742 skb_tx_timestamp(skb);
745 /* Trigger transmission start */
746 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
751 /* TODO: Release all used data descriptors for TSO */
756 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
758 struct fec_enet_private *fep = netdev_priv(ndev);
763 ret = fec_enet_txq_submit_tso(skb, ndev);
765 ret = fec_enet_txq_submit_skb(skb, ndev);
769 entries_free = fec_enet_get_free_txdesc_num(fep);
770 if (entries_free <= fep->tx_stop_threshold)
771 netif_stop_queue(ndev);
776 /* Init RX & TX buffer descriptors
778 static void fec_enet_bd_init(struct net_device *dev)
780 struct fec_enet_private *fep = netdev_priv(dev);
784 /* Initialize the receive buffer descriptors. */
785 bdp = fep->rx_bd_base;
786 for (i = 0; i < fep->rx_ring_size; i++) {
788 /* Initialize the BD for every fragment in the page. */
789 if (bdp->cbd_bufaddr)
790 bdp->cbd_sc = BD_ENET_RX_EMPTY;
793 bdp = fec_enet_get_nextdesc(bdp, fep);
796 /* Set the last buffer to wrap */
797 bdp = fec_enet_get_prevdesc(bdp, fep);
798 bdp->cbd_sc |= BD_SC_WRAP;
800 fep->cur_rx = fep->rx_bd_base;
802 /* ...and the same for transmit */
803 bdp = fep->tx_bd_base;
805 for (i = 0; i < fep->tx_ring_size; i++) {
807 /* Initialize the BD for every fragment in the page. */
809 if (fep->tx_skbuff[i]) {
810 dev_kfree_skb_any(fep->tx_skbuff[i]);
811 fep->tx_skbuff[i] = NULL;
813 bdp->cbd_bufaddr = 0;
814 bdp = fec_enet_get_nextdesc(bdp, fep);
817 /* Set the last buffer to wrap */
818 bdp = fec_enet_get_prevdesc(bdp, fep);
819 bdp->cbd_sc |= BD_SC_WRAP;
824 * This function is called to start or restart the FEC during a link
825 * change, transmit timeout, or to reconfigure the FEC. The network
826 * packet processing for this device must be stopped before this call.
829 fec_restart(struct net_device *ndev)
831 struct fec_enet_private *fep = netdev_priv(ndev);
832 const struct platform_device_id *id_entry =
833 platform_get_device_id(fep->pdev);
837 u32 rcntl = OPT_FRAME_SIZE | 0x04;
838 u32 ecntl = 0x2; /* ETHEREN */
840 /* Whack a reset. We should wait for this. */
841 writel(1, fep->hwp + FEC_ECNTRL);
845 * enet-mac reset will reset mac address registers too,
846 * so need to reconfigure it.
848 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
849 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
850 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
851 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
854 /* Clear any outstanding interrupt. */
855 writel(0xffc00000, fep->hwp + FEC_IEVENT);
857 /* Set maximum receive buffer size. */
858 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
860 fec_enet_bd_init(ndev);
862 /* Set receive and transmit descriptor base. */
863 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
865 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
866 * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
868 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
869 * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
872 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
873 if (fep->tx_skbuff[i]) {
874 dev_kfree_skb_any(fep->tx_skbuff[i]);
875 fep->tx_skbuff[i] = NULL;
879 /* Enable MII mode */
880 if (fep->full_duplex == DUPLEX_FULL) {
882 writel(0x04, fep->hwp + FEC_X_CNTRL);
886 writel(0x0, fep->hwp + FEC_X_CNTRL);
890 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
892 #if !defined(CONFIG_M5272)
893 /* set RX checksum */
894 val = readl(fep->hwp + FEC_RACC);
895 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
896 val |= FEC_RACC_OPTIONS;
898 val &= ~FEC_RACC_OPTIONS;
899 writel(val, fep->hwp + FEC_RACC);
903 * The phy interface and speed need to get configured
904 * differently on enet-mac.
906 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
907 /* Enable flow control and length check */
908 rcntl |= 0x40000000 | 0x00000020;
910 /* RGMII, RMII or MII */
911 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
913 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
918 /* 1G, 100M or 10M */
920 if (fep->phy_dev->speed == SPEED_1000)
922 else if (fep->phy_dev->speed == SPEED_100)
928 #ifdef FEC_MIIGSK_ENR
929 if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
931 /* disable the gasket and wait */
932 writel(0, fep->hwp + FEC_MIIGSK_ENR);
933 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
937 * configure the gasket:
938 * RMII, 50 MHz, no loopback, no echo
939 * MII, 25 MHz, no loopback, no echo
941 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
942 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
943 if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
944 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
945 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
947 /* re-enable the gasket */
948 writel(2, fep->hwp + FEC_MIIGSK_ENR);
953 #if !defined(CONFIG_M5272)
954 /* enable pause frame*/
955 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
956 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
957 fep->phy_dev && fep->phy_dev->pause)) {
958 rcntl |= FEC_ENET_FCE;
960 /* set FIFO threshold parameter to reduce overrun */
961 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
962 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
963 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
964 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
967 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
969 rcntl &= ~FEC_ENET_FCE;
971 #endif /* !defined(CONFIG_M5272) */
973 writel(rcntl, fep->hwp + FEC_R_CNTRL);
975 /* Setup multicast filter. */
976 set_multicast_list(ndev);
978 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
979 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
982 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
983 /* enable ENET endian swap */
985 /* enable ENET store and forward mode */
986 writel(1 << 8, fep->hwp + FEC_X_WMRK);
993 /* Enable the MIB statistic event counters */
994 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
997 /* And last, enable the transmit and receive processing */
998 writel(ecntl, fep->hwp + FEC_ECNTRL);
999 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1001 if (fep->bufdesc_ex)
1002 fec_ptp_start_cyclecounter(ndev);
1004 /* Enable interrupts we wish to service */
1005 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1009 fec_stop(struct net_device *ndev)
1011 struct fec_enet_private *fep = netdev_priv(ndev);
1012 const struct platform_device_id *id_entry =
1013 platform_get_device_id(fep->pdev);
1014 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1016 /* We cannot expect a graceful transmit stop without link !!! */
1018 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1020 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1021 netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1024 /* Whack a reset. We should wait for this. */
1025 writel(1, fep->hwp + FEC_ECNTRL);
1027 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1028 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1030 /* We have to keep ENET enabled to have MII interrupt stay working */
1031 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1032 writel(2, fep->hwp + FEC_ECNTRL);
1033 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1039 fec_timeout(struct net_device *ndev)
1041 struct fec_enet_private *fep = netdev_priv(ndev);
1045 ndev->stats.tx_errors++;
1047 schedule_work(&fep->tx_timeout_work);
1050 static void fec_enet_timeout_work(struct work_struct *work)
1052 struct fec_enet_private *fep =
1053 container_of(work, struct fec_enet_private, tx_timeout_work);
1054 struct net_device *ndev = fep->netdev;
1057 if (netif_device_present(ndev) || netif_running(ndev)) {
1058 napi_disable(&fep->napi);
1059 netif_tx_lock_bh(ndev);
1061 netif_wake_queue(ndev);
1062 netif_tx_unlock_bh(ndev);
1063 napi_enable(&fep->napi);
1069 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1070 struct skb_shared_hwtstamps *hwtstamps)
1072 unsigned long flags;
1075 spin_lock_irqsave(&fep->tmreg_lock, flags);
1076 ns = timecounter_cyc2time(&fep->tc, ts);
1077 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1079 memset(hwtstamps, 0, sizeof(*hwtstamps));
1080 hwtstamps->hwtstamp = ns_to_ktime(ns);
1084 fec_enet_tx(struct net_device *ndev)
1086 struct fec_enet_private *fep;
1087 struct bufdesc *bdp;
1088 unsigned short status;
1089 struct sk_buff *skb;
1093 fep = netdev_priv(ndev);
1094 bdp = fep->dirty_tx;
1096 /* get next bdp of dirty_tx */
1097 bdp = fec_enet_get_nextdesc(bdp, fep);
1099 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
1101 /* current queue is empty */
1102 if (bdp == fep->cur_tx)
1105 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
1107 skb = fep->tx_skbuff[index];
1108 fep->tx_skbuff[index] = NULL;
1109 if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
1110 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1111 bdp->cbd_datlen, DMA_TO_DEVICE);
1112 bdp->cbd_bufaddr = 0;
1114 bdp = fec_enet_get_nextdesc(bdp, fep);
1118 /* Check for errors. */
1119 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1120 BD_ENET_TX_RL | BD_ENET_TX_UN |
1122 ndev->stats.tx_errors++;
1123 if (status & BD_ENET_TX_HB) /* No heartbeat */
1124 ndev->stats.tx_heartbeat_errors++;
1125 if (status & BD_ENET_TX_LC) /* Late collision */
1126 ndev->stats.tx_window_errors++;
1127 if (status & BD_ENET_TX_RL) /* Retrans limit */
1128 ndev->stats.tx_aborted_errors++;
1129 if (status & BD_ENET_TX_UN) /* Underrun */
1130 ndev->stats.tx_fifo_errors++;
1131 if (status & BD_ENET_TX_CSL) /* Carrier lost */
1132 ndev->stats.tx_carrier_errors++;
1134 ndev->stats.tx_packets++;
1135 ndev->stats.tx_bytes += skb->len;
1138 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
1140 struct skb_shared_hwtstamps shhwtstamps;
1141 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1143 fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
1144 skb_tstamp_tx(skb, &shhwtstamps);
1147 /* Deferred means some collisions occurred during transmit,
1148 * but we eventually sent the packet OK.
1150 if (status & BD_ENET_TX_DEF)
1151 ndev->stats.collisions++;
1153 /* Free the sk buffer associated with this last transmit */
1154 dev_kfree_skb_any(skb);
1156 fep->dirty_tx = bdp;
1158 /* Update pointer to next buffer descriptor to be transmitted */
1159 bdp = fec_enet_get_nextdesc(bdp, fep);
1161 /* Since we have freed up a buffer, the ring is no longer full
1163 if (netif_queue_stopped(ndev)) {
1164 entries_free = fec_enet_get_free_txdesc_num(fep);
1165 if (entries_free >= fep->tx_wake_threshold)
1166 netif_wake_queue(ndev);
1170 /* ERR006538: Keep the transmitter going */
1171 if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
1172 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
1175 /* During a receive, the cur_rx points to the current incoming buffer.
1176 * When we update through the ring, if the next incoming buffer has
1177 * not been given to the system, we just set the empty indicator,
1178 * effectively tossing the packet.
1181 fec_enet_rx(struct net_device *ndev, int budget)
1183 struct fec_enet_private *fep = netdev_priv(ndev);
1184 const struct platform_device_id *id_entry =
1185 platform_get_device_id(fep->pdev);
1186 struct bufdesc *bdp;
1187 unsigned short status;
1188 struct sk_buff *skb;
1191 int pkt_received = 0;
1192 struct bufdesc_ex *ebdp = NULL;
1193 bool vlan_packet_rcvd = false;
1201 /* First, grab all of the stats for the incoming packet.
1202 * These get messed up if we get called due to a busy condition.
1206 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
1208 if (pkt_received >= budget)
1212 /* Since we have allocated space to hold a complete frame,
1213 * the last indicator should be set.
1215 if ((status & BD_ENET_RX_LAST) == 0)
1216 netdev_err(ndev, "rcv is not +last\n");
1218 writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
1220 /* Check for errors. */
1221 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1222 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
1223 ndev->stats.rx_errors++;
1224 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
1225 /* Frame too long or too short. */
1226 ndev->stats.rx_length_errors++;
1228 if (status & BD_ENET_RX_NO) /* Frame alignment */
1229 ndev->stats.rx_frame_errors++;
1230 if (status & BD_ENET_RX_CR) /* CRC Error */
1231 ndev->stats.rx_crc_errors++;
1232 if (status & BD_ENET_RX_OV) /* FIFO overrun */
1233 ndev->stats.rx_fifo_errors++;
1236 /* Report late collisions as a frame error.
1237 * On this error, the BD is closed, but we don't know what we
1238 * have in the buffer. So, just drop this frame on the floor.
1240 if (status & BD_ENET_RX_CL) {
1241 ndev->stats.rx_errors++;
1242 ndev->stats.rx_frame_errors++;
1243 goto rx_processing_done;
1246 /* Process the incoming frame. */
1247 ndev->stats.rx_packets++;
1248 pkt_len = bdp->cbd_datlen;
1249 ndev->stats.rx_bytes += pkt_len;
1251 index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
1252 data = fep->rx_skbuff[index]->data;
1253 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
1254 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1256 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
1257 swap_buffer(data, pkt_len);
1259 /* Extract the enhanced buffer descriptor */
1261 if (fep->bufdesc_ex)
1262 ebdp = (struct bufdesc_ex *)bdp;
1264 /* If this is a VLAN packet remove the VLAN Tag */
1265 vlan_packet_rcvd = false;
1266 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1267 fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
1268 /* Push and remove the vlan tag */
1269 struct vlan_hdr *vlan_header =
1270 (struct vlan_hdr *) (data + ETH_HLEN);
1271 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1272 pkt_len -= VLAN_HLEN;
1274 vlan_packet_rcvd = true;
1277 /* This does 16 byte alignment, exactly what we need.
1278 * The packet length includes FCS, but we don't want to
1279 * include that when passing upstream as it messes up
1280 * bridging applications.
1282 skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
1284 if (unlikely(!skb)) {
1285 ndev->stats.rx_dropped++;
1287 int payload_offset = (2 * ETH_ALEN);
1288 skb_reserve(skb, NET_IP_ALIGN);
1289 skb_put(skb, pkt_len - 4); /* Make room */
1291 /* Extract the frame data without the VLAN header. */
1292 skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN));
1293 if (vlan_packet_rcvd)
1294 payload_offset = (2 * ETH_ALEN) + VLAN_HLEN;
1295 skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN),
1296 data + payload_offset,
1297 pkt_len - 4 - (2 * ETH_ALEN));
1299 skb->protocol = eth_type_trans(skb, ndev);
1301 /* Get receive timestamp from the skb */
1302 if (fep->hwts_rx_en && fep->bufdesc_ex)
1303 fec_enet_hwtstamp(fep, ebdp->ts,
1304 skb_hwtstamps(skb));
1306 if (fep->bufdesc_ex &&
1307 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1308 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
1309 /* don't check it */
1310 skb->ip_summed = CHECKSUM_UNNECESSARY;
1312 skb_checksum_none_assert(skb);
1316 /* Handle received VLAN packets */
1317 if (vlan_packet_rcvd)
1318 __vlan_hwaccel_put_tag(skb,
1322 napi_gro_receive(&fep->napi, skb);
1325 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
1326 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1328 /* Clear the status flags for this buffer */
1329 status &= ~BD_ENET_RX_STATS;
1331 /* Mark the buffer empty */
1332 status |= BD_ENET_RX_EMPTY;
1333 bdp->cbd_sc = status;
1335 if (fep->bufdesc_ex) {
1336 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1338 ebdp->cbd_esc = BD_ENET_RX_INT;
1343 /* Update BD pointer to next entry */
1344 bdp = fec_enet_get_nextdesc(bdp, fep);
1346 /* Doing this here will keep the FEC running while we process
1347 * incoming frames. On a heavily loaded network, we should be
1348 * able to keep up at the expense of system resources.
1350 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1354 return pkt_received;
1358 fec_enet_interrupt(int irq, void *dev_id)
1360 struct net_device *ndev = dev_id;
1361 struct fec_enet_private *fep = netdev_priv(ndev);
1362 const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF;
1364 irqreturn_t ret = IRQ_NONE;
1366 int_events = readl(fep->hwp + FEC_IEVENT);
1367 writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
1369 if (int_events & napi_mask) {
1372 /* Disable the NAPI interrupts */
1373 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1374 napi_schedule(&fep->napi);
1377 if (int_events & FEC_ENET_MII) {
1379 complete(&fep->mdio_done);
1385 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1387 struct net_device *ndev = napi->dev;
1388 struct fec_enet_private *fep = netdev_priv(ndev);
1392 * Clear any pending transmit or receive interrupts before
1393 * processing the rings to avoid racing with the hardware.
1395 writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT);
1397 pkts = fec_enet_rx(ndev, budget);
1401 if (pkts < budget) {
1402 napi_complete(napi);
1403 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1408 /* ------------------------------------------------------------------------- */
1409 static void fec_get_mac(struct net_device *ndev)
1411 struct fec_enet_private *fep = netdev_priv(ndev);
1412 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1413 unsigned char *iap, tmpaddr[ETH_ALEN];
1416 * try to get mac address in following order:
1418 * 1) module parameter via kernel command line in form
1419 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1424 * 2) from device tree data
1426 if (!is_valid_ether_addr(iap)) {
1427 struct device_node *np = fep->pdev->dev.of_node;
1429 const char *mac = of_get_mac_address(np);
1431 iap = (unsigned char *) mac;
1436 * 3) from flash or fuse (via platform data)
1438 if (!is_valid_ether_addr(iap)) {
1441 iap = (unsigned char *)FEC_FLASHMAC;
1444 iap = (unsigned char *)&pdata->mac;
1449 * 4) FEC mac registers set by bootloader
1451 if (!is_valid_ether_addr(iap)) {
1452 *((__be32 *) &tmpaddr[0]) =
1453 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1454 *((__be16 *) &tmpaddr[4]) =
1455 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1460 * 5) random mac address
1462 if (!is_valid_ether_addr(iap)) {
1463 /* Report it and use a random ethernet address instead */
1464 netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
1465 eth_hw_addr_random(ndev);
1466 netdev_info(ndev, "Using random MAC address: %pM\n",
1471 memcpy(ndev->dev_addr, iap, ETH_ALEN);
1473 /* Adjust MAC if using macaddr */
1475 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
1478 /* ------------------------------------------------------------------------- */
1483 static void fec_enet_adjust_link(struct net_device *ndev)
1485 struct fec_enet_private *fep = netdev_priv(ndev);
1486 struct phy_device *phy_dev = fep->phy_dev;
1487 int status_change = 0;
1489 /* Prevent a state halted on mii error */
1490 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
1491 phy_dev->state = PHY_RESUMING;
1496 * If the netdev is down, or is going down, we're not interested
1497 * in link state events, so just mark our idea of the link as down
1498 * and ignore the event.
1500 if (!netif_running(ndev) || !netif_device_present(ndev)) {
1502 } else if (phy_dev->link) {
1504 fep->link = phy_dev->link;
1508 if (fep->full_duplex != phy_dev->duplex) {
1509 fep->full_duplex = phy_dev->duplex;
1513 if (phy_dev->speed != fep->speed) {
1514 fep->speed = phy_dev->speed;
1518 /* if any of the above changed restart the FEC */
1519 if (status_change) {
1520 napi_disable(&fep->napi);
1521 netif_tx_lock_bh(ndev);
1523 netif_wake_queue(ndev);
1524 netif_tx_unlock_bh(ndev);
1525 napi_enable(&fep->napi);
1529 napi_disable(&fep->napi);
1530 netif_tx_lock_bh(ndev);
1532 netif_tx_unlock_bh(ndev);
1533 napi_enable(&fep->napi);
1534 fep->link = phy_dev->link;
1540 phy_print_status(phy_dev);
1543 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1545 struct fec_enet_private *fep = bus->priv;
1546 unsigned long time_left;
1548 fep->mii_timeout = 0;
1549 init_completion(&fep->mdio_done);
1551 /* start a read op */
1552 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
1553 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1554 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
1556 /* wait for end of transfer */
1557 time_left = wait_for_completion_timeout(&fep->mdio_done,
1558 usecs_to_jiffies(FEC_MII_TIMEOUT));
1559 if (time_left == 0) {
1560 fep->mii_timeout = 1;
1561 netdev_err(fep->netdev, "MDIO read timeout\n");
1566 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1569 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1572 struct fec_enet_private *fep = bus->priv;
1573 unsigned long time_left;
1575 fep->mii_timeout = 0;
1576 init_completion(&fep->mdio_done);
1578 /* start a write op */
1579 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1580 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1581 FEC_MMFR_TA | FEC_MMFR_DATA(value),
1582 fep->hwp + FEC_MII_DATA);
1584 /* wait for end of transfer */
1585 time_left = wait_for_completion_timeout(&fep->mdio_done,
1586 usecs_to_jiffies(FEC_MII_TIMEOUT));
1587 if (time_left == 0) {
1588 fep->mii_timeout = 1;
1589 netdev_err(fep->netdev, "MDIO write timeout\n");
1596 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1598 struct fec_enet_private *fep = netdev_priv(ndev);
1602 ret = clk_prepare_enable(fep->clk_ahb);
1605 ret = clk_prepare_enable(fep->clk_ipg);
1607 goto failed_clk_ipg;
1608 if (fep->clk_enet_out) {
1609 ret = clk_prepare_enable(fep->clk_enet_out);
1611 goto failed_clk_enet_out;
1614 ret = clk_prepare_enable(fep->clk_ptp);
1616 goto failed_clk_ptp;
1619 clk_disable_unprepare(fep->clk_ahb);
1620 clk_disable_unprepare(fep->clk_ipg);
1621 if (fep->clk_enet_out)
1622 clk_disable_unprepare(fep->clk_enet_out);
1624 clk_disable_unprepare(fep->clk_ptp);
1629 if (fep->clk_enet_out)
1630 clk_disable_unprepare(fep->clk_enet_out);
1631 failed_clk_enet_out:
1632 clk_disable_unprepare(fep->clk_ipg);
1634 clk_disable_unprepare(fep->clk_ahb);
1639 static int fec_enet_mii_probe(struct net_device *ndev)
1641 struct fec_enet_private *fep = netdev_priv(ndev);
1642 const struct platform_device_id *id_entry =
1643 platform_get_device_id(fep->pdev);
1644 struct phy_device *phy_dev = NULL;
1645 char mdio_bus_id[MII_BUS_ID_SIZE];
1646 char phy_name[MII_BUS_ID_SIZE + 3];
1648 int dev_id = fep->dev_id;
1650 fep->phy_dev = NULL;
1652 if (fep->phy_node) {
1653 phy_dev = of_phy_connect(ndev, fep->phy_node,
1654 &fec_enet_adjust_link, 0,
1655 fep->phy_interface);
1657 /* check for attached phy */
1658 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1659 if ((fep->mii_bus->phy_mask & (1 << phy_id)))
1661 if (fep->mii_bus->phy_map[phy_id] == NULL)
1663 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
1667 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1671 if (phy_id >= PHY_MAX_ADDR) {
1672 netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1673 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1677 snprintf(phy_name, sizeof(phy_name),
1678 PHY_ID_FMT, mdio_bus_id, phy_id);
1679 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
1680 fep->phy_interface);
1683 if (IS_ERR(phy_dev)) {
1684 netdev_err(ndev, "could not attach to PHY\n");
1685 return PTR_ERR(phy_dev);
1688 /* mask with MAC supported features */
1689 if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
1690 phy_dev->supported &= PHY_GBIT_FEATURES;
1691 phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
1692 #if !defined(CONFIG_M5272)
1693 phy_dev->supported |= SUPPORTED_Pause;
1697 phy_dev->supported &= PHY_BASIC_FEATURES;
1699 phy_dev->advertising = phy_dev->supported;
1701 fep->phy_dev = phy_dev;
1703 fep->full_duplex = 0;
1705 netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1706 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1712 static int fec_enet_mii_init(struct platform_device *pdev)
1714 static struct mii_bus *fec0_mii_bus;
1715 struct net_device *ndev = platform_get_drvdata(pdev);
1716 struct fec_enet_private *fep = netdev_priv(ndev);
1717 const struct platform_device_id *id_entry =
1718 platform_get_device_id(fep->pdev);
1719 struct device_node *node;
1720 int err = -ENXIO, i;
1723 * The dual fec interfaces are not equivalent with enet-mac.
1724 * Here are the differences:
1726 * - fec0 supports MII & RMII modes while fec1 only supports RMII
1727 * - fec0 acts as the 1588 time master while fec1 is slave
1728 * - external phys can only be configured by fec0
1730 * That is to say fec1 can not work independently. It only works
1731 * when fec0 is working. The reason behind this design is that the
1732 * second interface is added primarily for Switch mode.
1734 * Because of the last point above, both phys are attached on fec0
1735 * mdio interface in board design, and need to be configured by
1738 if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
1739 /* fec1 uses fec0 mii_bus */
1740 if (mii_cnt && fec0_mii_bus) {
1741 fep->mii_bus = fec0_mii_bus;
1748 fep->mii_timeout = 0;
1751 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
1753 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
1754 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
1755 * Reference Manual has an error on this, and gets fixed on i.MX6Q
1758 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
1759 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1761 fep->phy_speed <<= 1;
1762 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1764 fep->mii_bus = mdiobus_alloc();
1765 if (fep->mii_bus == NULL) {
1770 fep->mii_bus->name = "fec_enet_mii_bus";
1771 fep->mii_bus->read = fec_enet_mdio_read;
1772 fep->mii_bus->write = fec_enet_mdio_write;
1773 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1774 pdev->name, fep->dev_id + 1);
1775 fep->mii_bus->priv = fep;
1776 fep->mii_bus->parent = &pdev->dev;
1778 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1779 if (!fep->mii_bus->irq) {
1781 goto err_out_free_mdiobus;
1784 for (i = 0; i < PHY_MAX_ADDR; i++)
1785 fep->mii_bus->irq[i] = PHY_POLL;
1787 node = of_get_child_by_name(pdev->dev.of_node, "mdio");
1789 err = of_mdiobus_register(fep->mii_bus, node);
1792 err = mdiobus_register(fep->mii_bus);
1796 goto err_out_free_mdio_irq;
1800 /* save fec0 mii_bus */
1801 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1802 fec0_mii_bus = fep->mii_bus;
1806 err_out_free_mdio_irq:
1807 kfree(fep->mii_bus->irq);
1808 err_out_free_mdiobus:
1809 mdiobus_free(fep->mii_bus);
1814 static void fec_enet_mii_remove(struct fec_enet_private *fep)
1816 if (--mii_cnt == 0) {
1817 mdiobus_unregister(fep->mii_bus);
1818 kfree(fep->mii_bus->irq);
1819 mdiobus_free(fep->mii_bus);
1823 static int fec_enet_get_settings(struct net_device *ndev,
1824 struct ethtool_cmd *cmd)
1826 struct fec_enet_private *fep = netdev_priv(ndev);
1827 struct phy_device *phydev = fep->phy_dev;
1832 return phy_ethtool_gset(phydev, cmd);
1835 static int fec_enet_set_settings(struct net_device *ndev,
1836 struct ethtool_cmd *cmd)
1838 struct fec_enet_private *fep = netdev_priv(ndev);
1839 struct phy_device *phydev = fep->phy_dev;
1844 return phy_ethtool_sset(phydev, cmd);
1847 static void fec_enet_get_drvinfo(struct net_device *ndev,
1848 struct ethtool_drvinfo *info)
1850 struct fec_enet_private *fep = netdev_priv(ndev);
1852 strlcpy(info->driver, fep->pdev->dev.driver->name,
1853 sizeof(info->driver));
1854 strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
1855 strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
1858 static int fec_enet_get_ts_info(struct net_device *ndev,
1859 struct ethtool_ts_info *info)
1861 struct fec_enet_private *fep = netdev_priv(ndev);
1863 if (fep->bufdesc_ex) {
1865 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1866 SOF_TIMESTAMPING_RX_SOFTWARE |
1867 SOF_TIMESTAMPING_SOFTWARE |
1868 SOF_TIMESTAMPING_TX_HARDWARE |
1869 SOF_TIMESTAMPING_RX_HARDWARE |
1870 SOF_TIMESTAMPING_RAW_HARDWARE;
1872 info->phc_index = ptp_clock_index(fep->ptp_clock);
1874 info->phc_index = -1;
1876 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1877 (1 << HWTSTAMP_TX_ON);
1879 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1880 (1 << HWTSTAMP_FILTER_ALL);
1883 return ethtool_op_get_ts_info(ndev, info);
1887 #if !defined(CONFIG_M5272)
1889 static void fec_enet_get_pauseparam(struct net_device *ndev,
1890 struct ethtool_pauseparam *pause)
1892 struct fec_enet_private *fep = netdev_priv(ndev);
1894 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
1895 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
1896 pause->rx_pause = pause->tx_pause;
1899 static int fec_enet_set_pauseparam(struct net_device *ndev,
1900 struct ethtool_pauseparam *pause)
1902 struct fec_enet_private *fep = netdev_priv(ndev);
1907 if (pause->tx_pause != pause->rx_pause) {
1909 "hardware only support enable/disable both tx and rx");
1913 fep->pause_flag = 0;
1915 /* tx pause must be same as rx pause */
1916 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
1917 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
1919 if (pause->rx_pause || pause->autoneg) {
1920 fep->phy_dev->supported |= ADVERTISED_Pause;
1921 fep->phy_dev->advertising |= ADVERTISED_Pause;
1923 fep->phy_dev->supported &= ~ADVERTISED_Pause;
1924 fep->phy_dev->advertising &= ~ADVERTISED_Pause;
1927 if (pause->autoneg) {
1928 if (netif_running(ndev))
1930 phy_start_aneg(fep->phy_dev);
1932 if (netif_running(ndev)) {
1933 napi_disable(&fep->napi);
1934 netif_tx_lock_bh(ndev);
1936 netif_wake_queue(ndev);
1937 netif_tx_unlock_bh(ndev);
1938 napi_enable(&fep->napi);
1944 static const struct fec_stat {
1945 char name[ETH_GSTRING_LEN];
1949 { "tx_dropped", RMON_T_DROP },
1950 { "tx_packets", RMON_T_PACKETS },
1951 { "tx_broadcast", RMON_T_BC_PKT },
1952 { "tx_multicast", RMON_T_MC_PKT },
1953 { "tx_crc_errors", RMON_T_CRC_ALIGN },
1954 { "tx_undersize", RMON_T_UNDERSIZE },
1955 { "tx_oversize", RMON_T_OVERSIZE },
1956 { "tx_fragment", RMON_T_FRAG },
1957 { "tx_jabber", RMON_T_JAB },
1958 { "tx_collision", RMON_T_COL },
1959 { "tx_64byte", RMON_T_P64 },
1960 { "tx_65to127byte", RMON_T_P65TO127 },
1961 { "tx_128to255byte", RMON_T_P128TO255 },
1962 { "tx_256to511byte", RMON_T_P256TO511 },
1963 { "tx_512to1023byte", RMON_T_P512TO1023 },
1964 { "tx_1024to2047byte", RMON_T_P1024TO2047 },
1965 { "tx_GTE2048byte", RMON_T_P_GTE2048 },
1966 { "tx_octets", RMON_T_OCTETS },
1969 { "IEEE_tx_drop", IEEE_T_DROP },
1970 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
1971 { "IEEE_tx_1col", IEEE_T_1COL },
1972 { "IEEE_tx_mcol", IEEE_T_MCOL },
1973 { "IEEE_tx_def", IEEE_T_DEF },
1974 { "IEEE_tx_lcol", IEEE_T_LCOL },
1975 { "IEEE_tx_excol", IEEE_T_EXCOL },
1976 { "IEEE_tx_macerr", IEEE_T_MACERR },
1977 { "IEEE_tx_cserr", IEEE_T_CSERR },
1978 { "IEEE_tx_sqe", IEEE_T_SQE },
1979 { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
1980 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
1983 { "rx_packets", RMON_R_PACKETS },
1984 { "rx_broadcast", RMON_R_BC_PKT },
1985 { "rx_multicast", RMON_R_MC_PKT },
1986 { "rx_crc_errors", RMON_R_CRC_ALIGN },
1987 { "rx_undersize", RMON_R_UNDERSIZE },
1988 { "rx_oversize", RMON_R_OVERSIZE },
1989 { "rx_fragment", RMON_R_FRAG },
1990 { "rx_jabber", RMON_R_JAB },
1991 { "rx_64byte", RMON_R_P64 },
1992 { "rx_65to127byte", RMON_R_P65TO127 },
1993 { "rx_128to255byte", RMON_R_P128TO255 },
1994 { "rx_256to511byte", RMON_R_P256TO511 },
1995 { "rx_512to1023byte", RMON_R_P512TO1023 },
1996 { "rx_1024to2047byte", RMON_R_P1024TO2047 },
1997 { "rx_GTE2048byte", RMON_R_P_GTE2048 },
1998 { "rx_octets", RMON_R_OCTETS },
2001 { "IEEE_rx_drop", IEEE_R_DROP },
2002 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2003 { "IEEE_rx_crc", IEEE_R_CRC },
2004 { "IEEE_rx_align", IEEE_R_ALIGN },
2005 { "IEEE_rx_macerr", IEEE_R_MACERR },
2006 { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2007 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2010 static void fec_enet_get_ethtool_stats(struct net_device *dev,
2011 struct ethtool_stats *stats, u64 *data)
2013 struct fec_enet_private *fep = netdev_priv(dev);
2016 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2017 data[i] = readl(fep->hwp + fec_stats[i].offset);
2020 static void fec_enet_get_strings(struct net_device *netdev,
2021 u32 stringset, u8 *data)
2024 switch (stringset) {
2026 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2027 memcpy(data + i * ETH_GSTRING_LEN,
2028 fec_stats[i].name, ETH_GSTRING_LEN);
2033 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
2037 return ARRAY_SIZE(fec_stats);
2042 #endif /* !defined(CONFIG_M5272) */
2044 static int fec_enet_nway_reset(struct net_device *dev)
2046 struct fec_enet_private *fep = netdev_priv(dev);
2047 struct phy_device *phydev = fep->phy_dev;
2052 return genphy_restart_aneg(phydev);
2055 static const struct ethtool_ops fec_enet_ethtool_ops = {
2056 .get_settings = fec_enet_get_settings,
2057 .set_settings = fec_enet_set_settings,
2058 .get_drvinfo = fec_enet_get_drvinfo,
2059 .nway_reset = fec_enet_nway_reset,
2060 .get_link = ethtool_op_get_link,
2061 #ifndef CONFIG_M5272
2062 .get_pauseparam = fec_enet_get_pauseparam,
2063 .set_pauseparam = fec_enet_set_pauseparam,
2064 .get_strings = fec_enet_get_strings,
2065 .get_ethtool_stats = fec_enet_get_ethtool_stats,
2066 .get_sset_count = fec_enet_get_sset_count,
2068 .get_ts_info = fec_enet_get_ts_info,
2071 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2073 struct fec_enet_private *fep = netdev_priv(ndev);
2074 struct phy_device *phydev = fep->phy_dev;
2076 if (!netif_running(ndev))
2082 if (fep->bufdesc_ex) {
2083 if (cmd == SIOCSHWTSTAMP)
2084 return fec_ptp_set(ndev, rq);
2085 if (cmd == SIOCGHWTSTAMP)
2086 return fec_ptp_get(ndev, rq);
2089 return phy_mii_ioctl(phydev, rq, cmd);
2092 static void fec_enet_free_buffers(struct net_device *ndev)
2094 struct fec_enet_private *fep = netdev_priv(ndev);
2096 struct sk_buff *skb;
2097 struct bufdesc *bdp;
2099 bdp = fep->rx_bd_base;
2100 for (i = 0; i < fep->rx_ring_size; i++) {
2101 skb = fep->rx_skbuff[i];
2102 fep->rx_skbuff[i] = NULL;
2104 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
2105 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
2108 bdp = fec_enet_get_nextdesc(bdp, fep);
2111 bdp = fep->tx_bd_base;
2112 for (i = 0; i < fep->tx_ring_size; i++) {
2113 kfree(fep->tx_bounce[i]);
2114 fep->tx_bounce[i] = NULL;
2115 skb = fep->tx_skbuff[i];
2116 fep->tx_skbuff[i] = NULL;
2121 static int fec_enet_alloc_buffers(struct net_device *ndev)
2123 struct fec_enet_private *fep = netdev_priv(ndev);
2125 struct sk_buff *skb;
2126 struct bufdesc *bdp;
2128 bdp = fep->rx_bd_base;
2129 for (i = 0; i < fep->rx_ring_size; i++) {
2132 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2136 addr = dma_map_single(&fep->pdev->dev, skb->data,
2137 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
2138 if (dma_mapping_error(&fep->pdev->dev, addr)) {
2140 if (net_ratelimit())
2141 netdev_err(ndev, "Rx DMA memory map failed\n");
2145 fep->rx_skbuff[i] = skb;
2146 bdp->cbd_bufaddr = addr;
2147 bdp->cbd_sc = BD_ENET_RX_EMPTY;
2149 if (fep->bufdesc_ex) {
2150 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2151 ebdp->cbd_esc = BD_ENET_RX_INT;
2154 bdp = fec_enet_get_nextdesc(bdp, fep);
2157 /* Set the last buffer to wrap. */
2158 bdp = fec_enet_get_prevdesc(bdp, fep);
2159 bdp->cbd_sc |= BD_SC_WRAP;
2161 bdp = fep->tx_bd_base;
2162 for (i = 0; i < fep->tx_ring_size; i++) {
2163 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
2164 if (!fep->tx_bounce[i])
2168 bdp->cbd_bufaddr = 0;
2170 if (fep->bufdesc_ex) {
2171 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2172 ebdp->cbd_esc = BD_ENET_TX_INT;
2175 bdp = fec_enet_get_nextdesc(bdp, fep);
2178 /* Set the last buffer to wrap. */
2179 bdp = fec_enet_get_prevdesc(bdp, fep);
2180 bdp->cbd_sc |= BD_SC_WRAP;
2185 fec_enet_free_buffers(ndev);
2190 fec_enet_open(struct net_device *ndev)
2192 struct fec_enet_private *fep = netdev_priv(ndev);
2195 pinctrl_pm_select_default_state(&fep->pdev->dev);
2196 ret = fec_enet_clk_enable(ndev, true);
2200 /* I should reset the ring buffers here, but I don't yet know
2201 * a simple way to do that.
2204 ret = fec_enet_alloc_buffers(ndev);
2208 /* Probe and connect to PHY when open the interface */
2209 ret = fec_enet_mii_probe(ndev);
2211 fec_enet_free_buffers(ndev);
2216 napi_enable(&fep->napi);
2217 phy_start(fep->phy_dev);
2218 netif_start_queue(ndev);
2223 fec_enet_close(struct net_device *ndev)
2225 struct fec_enet_private *fep = netdev_priv(ndev);
2227 phy_stop(fep->phy_dev);
2229 if (netif_device_present(ndev)) {
2230 napi_disable(&fep->napi);
2231 netif_tx_disable(ndev);
2235 phy_disconnect(fep->phy_dev);
2236 fep->phy_dev = NULL;
2238 fec_enet_clk_enable(ndev, false);
2239 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2240 fec_enet_free_buffers(ndev);
2245 /* Set or clear the multicast filter for this adaptor.
2246 * Skeleton taken from sunlance driver.
2247 * The CPM Ethernet implementation allows Multicast as well as individual
2248 * MAC address filtering. Some of the drivers check to make sure it is
2249 * a group multicast address, and discard those that are not. I guess I
2250 * will do the same for now, but just remove the test if you want
2251 * individual filtering as well (do the upper net layers want or support
2252 * this kind of feature?).
2255 #define HASH_BITS 6 /* #bits in hash */
2256 #define CRC32_POLY 0xEDB88320
2258 static void set_multicast_list(struct net_device *ndev)
2260 struct fec_enet_private *fep = netdev_priv(ndev);
2261 struct netdev_hw_addr *ha;
2262 unsigned int i, bit, data, crc, tmp;
2265 if (ndev->flags & IFF_PROMISC) {
2266 tmp = readl(fep->hwp + FEC_R_CNTRL);
2268 writel(tmp, fep->hwp + FEC_R_CNTRL);
2272 tmp = readl(fep->hwp + FEC_R_CNTRL);
2274 writel(tmp, fep->hwp + FEC_R_CNTRL);
2276 if (ndev->flags & IFF_ALLMULTI) {
2277 /* Catch all multicast addresses, so set the
2280 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2281 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2286 /* Clear filter and add the addresses in hash register
2288 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2289 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2291 netdev_for_each_mc_addr(ha, ndev) {
2292 /* calculate crc32 value of mac address */
2295 for (i = 0; i < ndev->addr_len; i++) {
2297 for (bit = 0; bit < 8; bit++, data >>= 1) {
2299 (((crc ^ data) & 1) ? CRC32_POLY : 0);
2303 /* only upper 6 bits (HASH_BITS) are used
2304 * which point to specific bit in he hash registers
2306 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
2309 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2310 tmp |= 1 << (hash - 32);
2311 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2313 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2315 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2320 /* Set a MAC change in hardware. */
2322 fec_set_mac_address(struct net_device *ndev, void *p)
2324 struct fec_enet_private *fep = netdev_priv(ndev);
2325 struct sockaddr *addr = p;
2328 if (!is_valid_ether_addr(addr->sa_data))
2329 return -EADDRNOTAVAIL;
2330 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
2333 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
2334 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
2335 fep->hwp + FEC_ADDR_LOW);
2336 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
2337 fep->hwp + FEC_ADDR_HIGH);
2341 #ifdef CONFIG_NET_POLL_CONTROLLER
2343 * fec_poll_controller - FEC Poll controller function
2344 * @dev: The FEC network adapter
2346 * Polled functionality used by netconsole and others in non interrupt mode
2349 static void fec_poll_controller(struct net_device *dev)
2352 struct fec_enet_private *fep = netdev_priv(dev);
2354 for (i = 0; i < FEC_IRQ_NUM; i++) {
2355 if (fep->irq[i] > 0) {
2356 disable_irq(fep->irq[i]);
2357 fec_enet_interrupt(fep->irq[i], dev);
2358 enable_irq(fep->irq[i]);
2364 #define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
2366 static int fec_set_features(struct net_device *netdev,
2367 netdev_features_t features)
2369 struct fec_enet_private *fep = netdev_priv(netdev);
2370 netdev_features_t changed = features ^ netdev->features;
2372 /* Quiesce the device if necessary */
2373 if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
2374 napi_disable(&fep->napi);
2375 netif_tx_lock_bh(netdev);
2379 netdev->features = features;
2381 /* Receive checksum has been changed */
2382 if (changed & NETIF_F_RXCSUM) {
2383 if (features & NETIF_F_RXCSUM)
2384 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
2386 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
2389 /* Resume the device after updates */
2390 if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
2391 fec_restart(netdev);
2392 netif_wake_queue(netdev);
2393 netif_tx_unlock_bh(netdev);
2394 napi_enable(&fep->napi);
2400 static const struct net_device_ops fec_netdev_ops = {
2401 .ndo_open = fec_enet_open,
2402 .ndo_stop = fec_enet_close,
2403 .ndo_start_xmit = fec_enet_start_xmit,
2404 .ndo_set_rx_mode = set_multicast_list,
2405 .ndo_change_mtu = eth_change_mtu,
2406 .ndo_validate_addr = eth_validate_addr,
2407 .ndo_tx_timeout = fec_timeout,
2408 .ndo_set_mac_address = fec_set_mac_address,
2409 .ndo_do_ioctl = fec_enet_ioctl,
2410 #ifdef CONFIG_NET_POLL_CONTROLLER
2411 .ndo_poll_controller = fec_poll_controller,
2413 .ndo_set_features = fec_set_features,
2417 * XXX: We need to clean up on failure exits here.
2420 static int fec_enet_init(struct net_device *ndev)
2422 struct fec_enet_private *fep = netdev_priv(ndev);
2423 const struct platform_device_id *id_entry =
2424 platform_get_device_id(fep->pdev);
2425 struct bufdesc *cbd_base;
2428 /* init the tx & rx ring size */
2429 fep->tx_ring_size = TX_RING_SIZE;
2430 fep->rx_ring_size = RX_RING_SIZE;
2432 fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
2433 fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
2435 if (fep->bufdesc_ex)
2436 fep->bufdesc_size = sizeof(struct bufdesc_ex);
2438 fep->bufdesc_size = sizeof(struct bufdesc);
2439 bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
2442 /* Allocate memory for buffer descriptors. */
2443 cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
2448 fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
2449 &fep->tso_hdrs_dma, GFP_KERNEL);
2450 if (!fep->tso_hdrs) {
2451 dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
2455 memset(cbd_base, 0, PAGE_SIZE);
2459 /* Get the Ethernet address */
2461 /* make sure MAC we just acquired is programmed into the hw */
2462 fec_set_mac_address(ndev, NULL);
2464 /* Set receive and transmit descriptor base. */
2465 fep->rx_bd_base = cbd_base;
2466 if (fep->bufdesc_ex)
2467 fep->tx_bd_base = (struct bufdesc *)
2468 (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
2470 fep->tx_bd_base = cbd_base + fep->rx_ring_size;
2472 /* The FEC Ethernet specific entries in the device structure */
2473 ndev->watchdog_timeo = TX_TIMEOUT;
2474 ndev->netdev_ops = &fec_netdev_ops;
2475 ndev->ethtool_ops = &fec_enet_ethtool_ops;
2477 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
2478 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
2480 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
2481 /* enable hw VLAN support */
2482 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2484 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
2485 ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
2487 /* enable hw accelerator */
2488 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2489 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
2490 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
2493 ndev->hw_features = ndev->features;
2501 static void fec_reset_phy(struct platform_device *pdev)
2505 struct device_node *np = pdev->dev.of_node;
2510 of_property_read_u32(np, "phy-reset-duration", &msec);
2511 /* A sane reset duration should not be longer than 1s */
2515 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
2516 if (!gpio_is_valid(phy_reset))
2519 err = devm_gpio_request_one(&pdev->dev, phy_reset,
2520 GPIOF_OUT_INIT_LOW, "phy-reset");
2522 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
2526 gpio_set_value(phy_reset, 1);
2528 #else /* CONFIG_OF */
2529 static void fec_reset_phy(struct platform_device *pdev)
2532 * In case of platform probe, the reset has been done
2536 #endif /* CONFIG_OF */
2539 fec_probe(struct platform_device *pdev)
2541 struct fec_enet_private *fep;
2542 struct fec_platform_data *pdata;
2543 struct net_device *ndev;
2544 int i, irq, ret = 0;
2546 const struct of_device_id *of_id;
2548 struct device_node *np = pdev->dev.of_node, *phy_node;
2550 of_id = of_match_device(fec_dt_ids, &pdev->dev);
2552 pdev->id_entry = of_id->data;
2554 /* Init network device */
2555 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
2559 SET_NETDEV_DEV(ndev, &pdev->dev);
2561 /* setup board info structure */
2562 fep = netdev_priv(ndev);
2564 #if !defined(CONFIG_M5272)
2565 /* default enable pause frame auto negotiation */
2566 if (pdev->id_entry &&
2567 (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
2568 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
2571 /* Select default pin state */
2572 pinctrl_pm_select_default_state(&pdev->dev);
2574 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2575 fep->hwp = devm_ioremap_resource(&pdev->dev, r);
2576 if (IS_ERR(fep->hwp)) {
2577 ret = PTR_ERR(fep->hwp);
2578 goto failed_ioremap;
2582 fep->dev_id = dev_id++;
2584 fep->bufdesc_ex = 0;
2586 platform_set_drvdata(pdev, ndev);
2588 phy_node = of_parse_phandle(np, "phy-handle", 0);
2589 if (!phy_node && of_phy_is_fixed_link(np)) {
2590 ret = of_phy_register_fixed_link(np);
2593 "broken fixed-link specification\n");
2596 phy_node = of_node_get(np);
2598 fep->phy_node = phy_node;
2600 ret = of_get_phy_mode(pdev->dev.of_node);
2602 pdata = dev_get_platdata(&pdev->dev);
2604 fep->phy_interface = pdata->phy;
2606 fep->phy_interface = PHY_INTERFACE_MODE_MII;
2608 fep->phy_interface = ret;
2611 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2612 if (IS_ERR(fep->clk_ipg)) {
2613 ret = PTR_ERR(fep->clk_ipg);
2617 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2618 if (IS_ERR(fep->clk_ahb)) {
2619 ret = PTR_ERR(fep->clk_ahb);
2623 /* enet_out is optional, depends on board */
2624 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
2625 if (IS_ERR(fep->clk_enet_out))
2626 fep->clk_enet_out = NULL;
2628 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
2630 pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
2631 if (IS_ERR(fep->clk_ptp)) {
2632 fep->clk_ptp = NULL;
2633 fep->bufdesc_ex = 0;
2636 ret = fec_enet_clk_enable(ndev, true);
2640 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
2641 if (!IS_ERR(fep->reg_phy)) {
2642 ret = regulator_enable(fep->reg_phy);
2645 "Failed to enable phy regulator: %d\n", ret);
2646 goto failed_regulator;
2649 fep->reg_phy = NULL;
2652 fec_reset_phy(pdev);
2654 if (fep->bufdesc_ex)
2657 ret = fec_enet_init(ndev);
2661 for (i = 0; i < FEC_IRQ_NUM; i++) {
2662 irq = platform_get_irq(pdev, i);
2669 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
2670 0, pdev->name, ndev);
2675 ret = fec_enet_mii_init(pdev);
2677 goto failed_mii_init;
2679 /* Carrier starts down, phylib will bring it up */
2680 netif_carrier_off(ndev);
2681 fec_enet_clk_enable(ndev, false);
2682 pinctrl_pm_select_sleep_state(&pdev->dev);
2684 ret = register_netdev(ndev);
2686 goto failed_register;
2688 if (fep->bufdesc_ex && fep->ptp_clock)
2689 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
2691 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
2695 fec_enet_mii_remove(fep);
2700 regulator_disable(fep->reg_phy);
2702 fec_enet_clk_enable(ndev, false);
2705 of_node_put(phy_node);
2713 fec_drv_remove(struct platform_device *pdev)
2715 struct net_device *ndev = platform_get_drvdata(pdev);
2716 struct fec_enet_private *fep = netdev_priv(ndev);
2718 cancel_work_sync(&fep->tx_timeout_work);
2719 unregister_netdev(ndev);
2720 fec_enet_mii_remove(fep);
2721 del_timer_sync(&fep->time_keep);
2723 regulator_disable(fep->reg_phy);
2725 ptp_clock_unregister(fep->ptp_clock);
2726 fec_enet_clk_enable(ndev, false);
2727 of_node_put(fep->phy_node);
2733 static int __maybe_unused fec_suspend(struct device *dev)
2735 struct net_device *ndev = dev_get_drvdata(dev);
2736 struct fec_enet_private *fep = netdev_priv(ndev);
2739 if (netif_running(ndev)) {
2740 phy_stop(fep->phy_dev);
2741 napi_disable(&fep->napi);
2742 netif_tx_lock_bh(ndev);
2743 netif_device_detach(ndev);
2744 netif_tx_unlock_bh(ndev);
2749 fec_enet_clk_enable(ndev, false);
2750 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2753 regulator_disable(fep->reg_phy);
2758 static int __maybe_unused fec_resume(struct device *dev)
2760 struct net_device *ndev = dev_get_drvdata(dev);
2761 struct fec_enet_private *fep = netdev_priv(ndev);
2765 ret = regulator_enable(fep->reg_phy);
2770 pinctrl_pm_select_default_state(&fep->pdev->dev);
2771 ret = fec_enet_clk_enable(ndev, true);
2776 if (netif_running(ndev)) {
2778 netif_tx_lock_bh(ndev);
2779 netif_device_attach(ndev);
2780 netif_tx_unlock_bh(ndev);
2781 napi_enable(&fep->napi);
2782 phy_start(fep->phy_dev);
2790 regulator_disable(fep->reg_phy);
2794 static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
2796 static struct platform_driver fec_driver = {
2798 .name = DRIVER_NAME,
2799 .owner = THIS_MODULE,
2801 .of_match_table = fec_dt_ids,
2803 .id_table = fec_devtype,
2805 .remove = fec_drv_remove,
2808 module_platform_driver(fec_driver);
2810 MODULE_ALIAS("platform:"DRIVER_NAME);
2811 MODULE_LICENSE("GPL");