1 /* drivers/net/ethernet/freescale/gianfar.c
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13 * Copyright 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
35 * last descriptor of the ring.
37 * When a packet is received, the RXF bit in the
38 * IEVENT register is set, triggering an interrupt when the
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
42 * of frames or amount of time have passed). In NAPI, the
43 * interrupt handler will signal there is work to be done, and
44 * exit. This method will start at the last known empty
45 * descriptor, and process every subsequent descriptor until there
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/delay.h>
74 #include <linux/netdevice.h>
75 #include <linux/etherdevice.h>
76 #include <linux/skbuff.h>
77 #include <linux/if_vlan.h>
78 #include <linux/spinlock.h>
80 #include <linux/of_address.h>
81 #include <linux/of_irq.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
88 #include <linux/net_tstamp.h>
93 #include <asm/mpc85xx.h>
96 #include <linux/uaccess.h>
97 #include <linux/module.h>
98 #include <linux/dma-mapping.h>
99 #include <linux/crc32.h>
100 #include <linux/mii.h>
101 #include <linux/phy.h>
102 #include <linux/phy_fixed.h>
103 #include <linux/of.h>
104 #include <linux/of_net.h>
108 #define TX_TIMEOUT (5*HZ)
110 const char gfar_driver_version[] = "2.0";
112 static int gfar_enet_open(struct net_device *dev);
113 static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
114 static void gfar_reset_task(struct work_struct *work);
115 static void gfar_timeout(struct net_device *dev);
116 static int gfar_close(struct net_device *dev);
117 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
119 static int gfar_set_mac_address(struct net_device *dev);
120 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
121 static irqreturn_t gfar_error(int irq, void *dev_id);
122 static irqreturn_t gfar_transmit(int irq, void *dev_id);
123 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
124 static void adjust_link(struct net_device *dev);
125 static noinline void gfar_update_link_state(struct gfar_private *priv);
126 static int init_phy(struct net_device *dev);
127 static int gfar_probe(struct platform_device *ofdev);
128 static int gfar_remove(struct platform_device *ofdev);
129 static void free_skb_resources(struct gfar_private *priv);
130 static void gfar_set_multi(struct net_device *dev);
131 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
132 static void gfar_configure_serdes(struct net_device *dev);
133 static int gfar_poll_rx(struct napi_struct *napi, int budget);
134 static int gfar_poll_tx(struct napi_struct *napi, int budget);
135 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
136 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
137 #ifdef CONFIG_NET_POLL_CONTROLLER
138 static void gfar_netpoll(struct net_device *dev);
140 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
141 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
142 static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
143 static void gfar_halt_nodisable(struct gfar_private *priv);
144 static void gfar_clear_exact_match(struct net_device *dev);
145 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
147 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
149 MODULE_AUTHOR("Freescale Semiconductor, Inc");
150 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
151 MODULE_LICENSE("GPL");
153 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
158 bdp->bufPtr = cpu_to_be32(buf);
160 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
161 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
162 lstatus |= BD_LFLAG(RXBD_WRAP);
166 bdp->lstatus = cpu_to_be32(lstatus);
169 static void gfar_init_bds(struct net_device *ndev)
171 struct gfar_private *priv = netdev_priv(ndev);
172 struct gfar __iomem *regs = priv->gfargrp[0].regs;
173 struct gfar_priv_tx_q *tx_queue = NULL;
174 struct gfar_priv_rx_q *rx_queue = NULL;
179 for (i = 0; i < priv->num_tx_queues; i++) {
180 tx_queue = priv->tx_queue[i];
181 /* Initialize some variables in our dev structure */
182 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
183 tx_queue->dirty_tx = tx_queue->tx_bd_base;
184 tx_queue->cur_tx = tx_queue->tx_bd_base;
185 tx_queue->skb_curtx = 0;
186 tx_queue->skb_dirtytx = 0;
188 /* Initialize Transmit Descriptor Ring */
189 txbdp = tx_queue->tx_bd_base;
190 for (j = 0; j < tx_queue->tx_ring_size; j++) {
196 /* Set the last descriptor in the ring to indicate wrap */
198 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
202 rfbptr = ®s->rfbptr0;
203 for (i = 0; i < priv->num_rx_queues; i++) {
204 rx_queue = priv->rx_queue[i];
206 rx_queue->next_to_clean = 0;
207 rx_queue->next_to_use = 0;
208 rx_queue->next_to_alloc = 0;
210 /* make sure next_to_clean != next_to_use after this
211 * by leaving at least 1 unused descriptor
213 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
215 rx_queue->rfbptr = rfbptr;
220 static int gfar_alloc_skb_resources(struct net_device *ndev)
225 struct gfar_private *priv = netdev_priv(ndev);
226 struct device *dev = priv->dev;
227 struct gfar_priv_tx_q *tx_queue = NULL;
228 struct gfar_priv_rx_q *rx_queue = NULL;
230 priv->total_tx_ring_size = 0;
231 for (i = 0; i < priv->num_tx_queues; i++)
232 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
234 priv->total_rx_ring_size = 0;
235 for (i = 0; i < priv->num_rx_queues; i++)
236 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
238 /* Allocate memory for the buffer descriptors */
239 vaddr = dma_alloc_coherent(dev,
240 (priv->total_tx_ring_size *
241 sizeof(struct txbd8)) +
242 (priv->total_rx_ring_size *
243 sizeof(struct rxbd8)),
248 for (i = 0; i < priv->num_tx_queues; i++) {
249 tx_queue = priv->tx_queue[i];
250 tx_queue->tx_bd_base = vaddr;
251 tx_queue->tx_bd_dma_base = addr;
252 tx_queue->dev = ndev;
253 /* enet DMA only understands physical addresses */
254 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
255 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
258 /* Start the rx descriptor ring where the tx ring leaves off */
259 for (i = 0; i < priv->num_rx_queues; i++) {
260 rx_queue = priv->rx_queue[i];
261 rx_queue->rx_bd_base = vaddr;
262 rx_queue->rx_bd_dma_base = addr;
263 rx_queue->ndev = ndev;
265 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
266 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
269 /* Setup the skbuff rings */
270 for (i = 0; i < priv->num_tx_queues; i++) {
271 tx_queue = priv->tx_queue[i];
272 tx_queue->tx_skbuff =
273 kmalloc_array(tx_queue->tx_ring_size,
274 sizeof(*tx_queue->tx_skbuff),
276 if (!tx_queue->tx_skbuff)
279 for (j = 0; j < tx_queue->tx_ring_size; j++)
280 tx_queue->tx_skbuff[j] = NULL;
283 for (i = 0; i < priv->num_rx_queues; i++) {
284 rx_queue = priv->rx_queue[i];
285 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
286 sizeof(*rx_queue->rx_buff),
288 if (!rx_queue->rx_buff)
297 free_skb_resources(priv);
301 static void gfar_init_tx_rx_base(struct gfar_private *priv)
303 struct gfar __iomem *regs = priv->gfargrp[0].regs;
307 baddr = ®s->tbase0;
308 for (i = 0; i < priv->num_tx_queues; i++) {
309 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
313 baddr = ®s->rbase0;
314 for (i = 0; i < priv->num_rx_queues; i++) {
315 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
320 static void gfar_init_rqprm(struct gfar_private *priv)
322 struct gfar __iomem *regs = priv->gfargrp[0].regs;
326 baddr = ®s->rqprm0;
327 for (i = 0; i < priv->num_rx_queues; i++) {
328 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
329 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
334 static void gfar_rx_offload_en(struct gfar_private *priv)
336 /* set this when rx hw offload (TOE) functions are being used */
337 priv->uses_rxfcb = 0;
339 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
340 priv->uses_rxfcb = 1;
342 if (priv->hwts_rx_en || priv->rx_filer_enable)
343 priv->uses_rxfcb = 1;
346 static void gfar_mac_rx_config(struct gfar_private *priv)
348 struct gfar __iomem *regs = priv->gfargrp[0].regs;
351 if (priv->rx_filer_enable) {
352 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
353 /* Program the RIR0 reg with the required distribution */
354 if (priv->poll_mode == GFAR_SQ_POLLING)
355 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
356 else /* GFAR_MQ_POLLING */
357 gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0);
360 /* Restore PROMISC mode */
361 if (priv->ndev->flags & IFF_PROMISC)
364 if (priv->ndev->features & NETIF_F_RXCSUM)
365 rctrl |= RCTRL_CHECKSUMMING;
367 if (priv->extended_hash)
368 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
371 rctrl &= ~RCTRL_PAL_MASK;
372 rctrl |= RCTRL_PADDING(priv->padding);
375 /* Enable HW time stamping if requested from user space */
376 if (priv->hwts_rx_en)
377 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
379 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
380 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
382 /* Clear the LFC bit */
383 gfar_write(®s->rctrl, rctrl);
384 /* Init flow control threshold values */
385 gfar_init_rqprm(priv);
386 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL);
389 /* Init rctrl based on our settings */
390 gfar_write(®s->rctrl, rctrl);
393 static void gfar_mac_tx_config(struct gfar_private *priv)
395 struct gfar __iomem *regs = priv->gfargrp[0].regs;
398 if (priv->ndev->features & NETIF_F_IP_CSUM)
399 tctrl |= TCTRL_INIT_CSUM;
401 if (priv->prio_sched_en)
402 tctrl |= TCTRL_TXSCHED_PRIO;
404 tctrl |= TCTRL_TXSCHED_WRRS;
405 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
406 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
409 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
410 tctrl |= TCTRL_VLINS;
412 gfar_write(®s->tctrl, tctrl);
415 static void gfar_configure_coalescing(struct gfar_private *priv,
416 unsigned long tx_mask, unsigned long rx_mask)
418 struct gfar __iomem *regs = priv->gfargrp[0].regs;
421 if (priv->mode == MQ_MG_MODE) {
424 baddr = ®s->txic0;
425 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
426 gfar_write(baddr + i, 0);
427 if (likely(priv->tx_queue[i]->txcoalescing))
428 gfar_write(baddr + i, priv->tx_queue[i]->txic);
431 baddr = ®s->rxic0;
432 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
433 gfar_write(baddr + i, 0);
434 if (likely(priv->rx_queue[i]->rxcoalescing))
435 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
438 /* Backward compatible case -- even if we enable
439 * multiple queues, there's only single reg to program
441 gfar_write(®s->txic, 0);
442 if (likely(priv->tx_queue[0]->txcoalescing))
443 gfar_write(®s->txic, priv->tx_queue[0]->txic);
445 gfar_write(®s->rxic, 0);
446 if (unlikely(priv->rx_queue[0]->rxcoalescing))
447 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
451 void gfar_configure_coalescing_all(struct gfar_private *priv)
453 gfar_configure_coalescing(priv, 0xFF, 0xFF);
456 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
458 struct gfar_private *priv = netdev_priv(dev);
459 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
460 unsigned long tx_packets = 0, tx_bytes = 0;
463 for (i = 0; i < priv->num_rx_queues; i++) {
464 rx_packets += priv->rx_queue[i]->stats.rx_packets;
465 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
466 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
469 dev->stats.rx_packets = rx_packets;
470 dev->stats.rx_bytes = rx_bytes;
471 dev->stats.rx_dropped = rx_dropped;
473 for (i = 0; i < priv->num_tx_queues; i++) {
474 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
475 tx_packets += priv->tx_queue[i]->stats.tx_packets;
478 dev->stats.tx_bytes = tx_bytes;
479 dev->stats.tx_packets = tx_packets;
484 static int gfar_set_mac_addr(struct net_device *dev, void *p)
486 eth_mac_addr(dev, p);
488 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
493 static const struct net_device_ops gfar_netdev_ops = {
494 .ndo_open = gfar_enet_open,
495 .ndo_start_xmit = gfar_start_xmit,
496 .ndo_stop = gfar_close,
497 .ndo_change_mtu = gfar_change_mtu,
498 .ndo_set_features = gfar_set_features,
499 .ndo_set_rx_mode = gfar_set_multi,
500 .ndo_tx_timeout = gfar_timeout,
501 .ndo_do_ioctl = gfar_ioctl,
502 .ndo_get_stats = gfar_get_stats,
503 .ndo_change_carrier = fixed_phy_change_carrier,
504 .ndo_set_mac_address = gfar_set_mac_addr,
505 .ndo_validate_addr = eth_validate_addr,
506 #ifdef CONFIG_NET_POLL_CONTROLLER
507 .ndo_poll_controller = gfar_netpoll,
511 static void gfar_ints_disable(struct gfar_private *priv)
514 for (i = 0; i < priv->num_grps; i++) {
515 struct gfar __iomem *regs = priv->gfargrp[i].regs;
517 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
519 /* Initialize IMASK */
520 gfar_write(®s->imask, IMASK_INIT_CLEAR);
524 static void gfar_ints_enable(struct gfar_private *priv)
527 for (i = 0; i < priv->num_grps; i++) {
528 struct gfar __iomem *regs = priv->gfargrp[i].regs;
529 /* Unmask the interrupts we look for */
530 gfar_write(®s->imask, IMASK_DEFAULT);
534 static int gfar_alloc_tx_queues(struct gfar_private *priv)
538 for (i = 0; i < priv->num_tx_queues; i++) {
539 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
541 if (!priv->tx_queue[i])
544 priv->tx_queue[i]->tx_skbuff = NULL;
545 priv->tx_queue[i]->qindex = i;
546 priv->tx_queue[i]->dev = priv->ndev;
547 spin_lock_init(&(priv->tx_queue[i]->txlock));
552 static int gfar_alloc_rx_queues(struct gfar_private *priv)
556 for (i = 0; i < priv->num_rx_queues; i++) {
557 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
559 if (!priv->rx_queue[i])
562 priv->rx_queue[i]->qindex = i;
563 priv->rx_queue[i]->ndev = priv->ndev;
568 static void gfar_free_tx_queues(struct gfar_private *priv)
572 for (i = 0; i < priv->num_tx_queues; i++)
573 kfree(priv->tx_queue[i]);
576 static void gfar_free_rx_queues(struct gfar_private *priv)
580 for (i = 0; i < priv->num_rx_queues; i++)
581 kfree(priv->rx_queue[i]);
584 static void unmap_group_regs(struct gfar_private *priv)
588 for (i = 0; i < MAXGROUPS; i++)
589 if (priv->gfargrp[i].regs)
590 iounmap(priv->gfargrp[i].regs);
593 static void free_gfar_dev(struct gfar_private *priv)
597 for (i = 0; i < priv->num_grps; i++)
598 for (j = 0; j < GFAR_NUM_IRQS; j++) {
599 kfree(priv->gfargrp[i].irqinfo[j]);
600 priv->gfargrp[i].irqinfo[j] = NULL;
603 free_netdev(priv->ndev);
606 static void disable_napi(struct gfar_private *priv)
610 for (i = 0; i < priv->num_grps; i++) {
611 napi_disable(&priv->gfargrp[i].napi_rx);
612 napi_disable(&priv->gfargrp[i].napi_tx);
616 static void enable_napi(struct gfar_private *priv)
620 for (i = 0; i < priv->num_grps; i++) {
621 napi_enable(&priv->gfargrp[i].napi_rx);
622 napi_enable(&priv->gfargrp[i].napi_tx);
626 static int gfar_parse_group(struct device_node *np,
627 struct gfar_private *priv, const char *model)
629 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
632 for (i = 0; i < GFAR_NUM_IRQS; i++) {
633 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
635 if (!grp->irqinfo[i])
639 grp->regs = of_iomap(np, 0);
643 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
645 /* If we aren't the FEC we have multiple interrupts */
646 if (model && strcasecmp(model, "FEC")) {
647 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
648 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
649 if (!gfar_irq(grp, TX)->irq ||
650 !gfar_irq(grp, RX)->irq ||
651 !gfar_irq(grp, ER)->irq)
656 spin_lock_init(&grp->grplock);
657 if (priv->mode == MQ_MG_MODE) {
658 u32 rxq_mask, txq_mask;
661 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
662 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
664 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
666 grp->rx_bit_map = rxq_mask ?
667 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
670 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
672 grp->tx_bit_map = txq_mask ?
673 txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
676 if (priv->poll_mode == GFAR_SQ_POLLING) {
677 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
678 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
679 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
682 grp->rx_bit_map = 0xFF;
683 grp->tx_bit_map = 0xFF;
686 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
687 * right to left, so we need to revert the 8 bits to get the q index
689 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
690 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
692 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
693 * also assign queues to groups
695 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
697 grp->rx_queue = priv->rx_queue[i];
698 grp->num_rx_queues++;
699 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
700 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
701 priv->rx_queue[i]->grp = grp;
704 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
706 grp->tx_queue = priv->tx_queue[i];
707 grp->num_tx_queues++;
708 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
709 priv->tqueue |= (TQUEUE_EN0 >> i);
710 priv->tx_queue[i]->grp = grp;
718 static int gfar_of_group_count(struct device_node *np)
720 struct device_node *child;
723 for_each_available_child_of_node(np, child)
724 if (of_node_name_eq(child, "queue-group"))
730 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
734 const void *mac_addr;
736 struct net_device *dev = NULL;
737 struct gfar_private *priv = NULL;
738 struct device_node *np = ofdev->dev.of_node;
739 struct device_node *child = NULL;
742 unsigned int num_tx_qs, num_rx_qs;
743 unsigned short mode, poll_mode;
748 if (of_device_is_compatible(np, "fsl,etsec2")) {
750 poll_mode = GFAR_SQ_POLLING;
753 poll_mode = GFAR_SQ_POLLING;
756 if (mode == SQ_SG_MODE) {
759 } else { /* MQ_MG_MODE */
760 /* get the actual number of supported groups */
761 unsigned int num_grps = gfar_of_group_count(np);
763 if (num_grps == 0 || num_grps > MAXGROUPS) {
764 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
766 pr_err("Cannot do alloc_etherdev, aborting\n");
770 if (poll_mode == GFAR_SQ_POLLING) {
771 num_tx_qs = num_grps; /* one txq per int group */
772 num_rx_qs = num_grps; /* one rxq per int group */
773 } else { /* GFAR_MQ_POLLING */
774 u32 tx_queues, rx_queues;
777 /* parse the num of HW tx and rx queues */
778 ret = of_property_read_u32(np, "fsl,num_tx_queues",
780 num_tx_qs = ret ? 1 : tx_queues;
782 ret = of_property_read_u32(np, "fsl,num_rx_queues",
784 num_rx_qs = ret ? 1 : rx_queues;
788 if (num_tx_qs > MAX_TX_QS) {
789 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
790 num_tx_qs, MAX_TX_QS);
791 pr_err("Cannot do alloc_etherdev, aborting\n");
795 if (num_rx_qs > MAX_RX_QS) {
796 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
797 num_rx_qs, MAX_RX_QS);
798 pr_err("Cannot do alloc_etherdev, aborting\n");
802 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
807 priv = netdev_priv(dev);
811 priv->poll_mode = poll_mode;
813 priv->num_tx_queues = num_tx_qs;
814 netif_set_real_num_rx_queues(dev, num_rx_qs);
815 priv->num_rx_queues = num_rx_qs;
817 err = gfar_alloc_tx_queues(priv);
819 goto tx_alloc_failed;
821 err = gfar_alloc_rx_queues(priv);
823 goto rx_alloc_failed;
825 err = of_property_read_string(np, "model", &model);
827 pr_err("Device model property missing, aborting\n");
828 goto rx_alloc_failed;
831 /* Init Rx queue filer rule set linked list */
832 INIT_LIST_HEAD(&priv->rx_list.list);
833 priv->rx_list.count = 0;
834 mutex_init(&priv->rx_queue_access);
836 for (i = 0; i < MAXGROUPS; i++)
837 priv->gfargrp[i].regs = NULL;
839 /* Parse and initialize group specific information */
840 if (priv->mode == MQ_MG_MODE) {
841 for_each_available_child_of_node(np, child) {
842 if (!of_node_name_eq(child, "queue-group"))
845 err = gfar_parse_group(child, priv, model);
849 } else { /* SQ_SG_MODE */
850 err = gfar_parse_group(np, priv, model);
855 if (of_property_read_bool(np, "bd-stash")) {
856 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
857 priv->bd_stash_en = 1;
860 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
863 priv->rx_stash_size = stash_len;
865 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
868 priv->rx_stash_index = stash_idx;
870 if (stash_len || stash_idx)
871 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
873 mac_addr = of_get_mac_address(np);
876 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
878 if (model && !strcasecmp(model, "TSEC"))
879 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
880 FSL_GIANFAR_DEV_HAS_COALESCE |
881 FSL_GIANFAR_DEV_HAS_RMON |
882 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
884 if (model && !strcasecmp(model, "eTSEC"))
885 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
886 FSL_GIANFAR_DEV_HAS_COALESCE |
887 FSL_GIANFAR_DEV_HAS_RMON |
888 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
889 FSL_GIANFAR_DEV_HAS_CSUM |
890 FSL_GIANFAR_DEV_HAS_VLAN |
891 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
892 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
893 FSL_GIANFAR_DEV_HAS_TIMER |
894 FSL_GIANFAR_DEV_HAS_RX_FILER;
896 err = of_property_read_string(np, "phy-connection-type", &ctype);
898 /* We only care about rgmii-id. The rest are autodetected */
899 if (err == 0 && !strcmp(ctype, "rgmii-id"))
900 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
902 priv->interface = PHY_INTERFACE_MODE_MII;
904 if (of_find_property(np, "fsl,magic-packet", NULL))
905 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
907 if (of_get_property(np, "fsl,wake-on-filer", NULL))
908 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
910 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
912 /* In the case of a fixed PHY, the DT node associated
913 * to the PHY is the Ethernet MAC DT node.
915 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
916 err = of_phy_register_fixed_link(np);
920 priv->phy_node = of_node_get(np);
923 /* Find the TBI PHY. If it's not there, we don't support SGMII */
924 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
929 unmap_group_regs(priv);
931 gfar_free_rx_queues(priv);
933 gfar_free_tx_queues(priv);
938 static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
940 struct hwtstamp_config config;
941 struct gfar_private *priv = netdev_priv(netdev);
943 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
946 /* reserved for future extensions */
950 switch (config.tx_type) {
951 case HWTSTAMP_TX_OFF:
952 priv->hwts_tx_en = 0;
955 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
957 priv->hwts_tx_en = 1;
963 switch (config.rx_filter) {
964 case HWTSTAMP_FILTER_NONE:
965 if (priv->hwts_rx_en) {
966 priv->hwts_rx_en = 0;
971 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
973 if (!priv->hwts_rx_en) {
974 priv->hwts_rx_en = 1;
977 config.rx_filter = HWTSTAMP_FILTER_ALL;
981 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
985 static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
987 struct hwtstamp_config config;
988 struct gfar_private *priv = netdev_priv(netdev);
991 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
992 config.rx_filter = (priv->hwts_rx_en ?
993 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
995 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
999 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1001 struct phy_device *phydev = dev->phydev;
1003 if (!netif_running(dev))
1006 if (cmd == SIOCSHWTSTAMP)
1007 return gfar_hwtstamp_set(dev, rq);
1008 if (cmd == SIOCGHWTSTAMP)
1009 return gfar_hwtstamp_get(dev, rq);
1014 return phy_mii_ioctl(phydev, rq, cmd);
1017 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1020 u32 rqfpr = FPR_FILER_MASK;
1024 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
1025 priv->ftp_rqfpr[rqfar] = rqfpr;
1026 priv->ftp_rqfcr[rqfar] = rqfcr;
1027 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1030 rqfcr = RQFCR_CMP_NOMATCH;
1031 priv->ftp_rqfpr[rqfar] = rqfpr;
1032 priv->ftp_rqfcr[rqfar] = rqfcr;
1033 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1036 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1038 priv->ftp_rqfcr[rqfar] = rqfcr;
1039 priv->ftp_rqfpr[rqfar] = rqfpr;
1040 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1043 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1045 priv->ftp_rqfcr[rqfar] = rqfcr;
1046 priv->ftp_rqfpr[rqfar] = rqfpr;
1047 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1052 static void gfar_init_filer_table(struct gfar_private *priv)
1055 u32 rqfar = MAX_FILER_IDX;
1057 u32 rqfpr = FPR_FILER_MASK;
1060 rqfcr = RQFCR_CMP_MATCH;
1061 priv->ftp_rqfcr[rqfar] = rqfcr;
1062 priv->ftp_rqfpr[rqfar] = rqfpr;
1063 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1065 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1066 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1067 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1068 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1069 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1070 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1072 /* cur_filer_idx indicated the first non-masked rule */
1073 priv->cur_filer_idx = rqfar;
1075 /* Rest are masked rules */
1076 rqfcr = RQFCR_CMP_NOMATCH;
1077 for (i = 0; i < rqfar; i++) {
1078 priv->ftp_rqfcr[i] = rqfcr;
1079 priv->ftp_rqfpr[i] = rqfpr;
1080 gfar_write_filer(priv, i, rqfcr, rqfpr);
1085 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1087 unsigned int pvr = mfspr(SPRN_PVR);
1088 unsigned int svr = mfspr(SPRN_SVR);
1089 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1090 unsigned int rev = svr & 0xffff;
1092 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1093 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1094 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1095 priv->errata |= GFAR_ERRATA_74;
1097 /* MPC8313 and MPC837x all rev */
1098 if ((pvr == 0x80850010 && mod == 0x80b0) ||
1099 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1100 priv->errata |= GFAR_ERRATA_76;
1102 /* MPC8313 Rev < 2.0 */
1103 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1104 priv->errata |= GFAR_ERRATA_12;
1107 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1109 unsigned int svr = mfspr(SPRN_SVR);
1111 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1112 priv->errata |= GFAR_ERRATA_12;
1113 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
1114 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1115 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
1116 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
1117 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1121 static void gfar_detect_errata(struct gfar_private *priv)
1123 struct device *dev = &priv->ofdev->dev;
1125 /* no plans to fix */
1126 priv->errata |= GFAR_ERRATA_A002;
1129 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1130 __gfar_detect_errata_85xx(priv);
1131 else /* non-mpc85xx parts, i.e. e300 core based */
1132 __gfar_detect_errata_83xx(priv);
1136 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1140 void gfar_mac_reset(struct gfar_private *priv)
1142 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1145 /* Reset MAC layer */
1146 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
1148 /* We need to delay at least 3 TX clocks */
1151 /* the soft reset bit is not self-resetting, so we need to
1152 * clear it before resuming normal operation
1154 gfar_write(®s->maccfg1, 0);
1158 gfar_rx_offload_en(priv);
1160 /* Initialize the max receive frame/buffer lengths */
1161 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
1162 gfar_write(®s->mrblr, GFAR_RXB_SIZE);
1164 /* Initialize the Minimum Frame Length Register */
1165 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
1167 /* Initialize MACCFG2. */
1168 tempval = MACCFG2_INIT_SETTINGS;
1170 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
1171 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
1172 * and by checking RxBD[LG] and discarding larger than MAXFRM.
1174 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1175 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1177 gfar_write(®s->maccfg2, tempval);
1179 /* Clear mac addr hash registers */
1180 gfar_write(®s->igaddr0, 0);
1181 gfar_write(®s->igaddr1, 0);
1182 gfar_write(®s->igaddr2, 0);
1183 gfar_write(®s->igaddr3, 0);
1184 gfar_write(®s->igaddr4, 0);
1185 gfar_write(®s->igaddr5, 0);
1186 gfar_write(®s->igaddr6, 0);
1187 gfar_write(®s->igaddr7, 0);
1189 gfar_write(®s->gaddr0, 0);
1190 gfar_write(®s->gaddr1, 0);
1191 gfar_write(®s->gaddr2, 0);
1192 gfar_write(®s->gaddr3, 0);
1193 gfar_write(®s->gaddr4, 0);
1194 gfar_write(®s->gaddr5, 0);
1195 gfar_write(®s->gaddr6, 0);
1196 gfar_write(®s->gaddr7, 0);
1198 if (priv->extended_hash)
1199 gfar_clear_exact_match(priv->ndev);
1201 gfar_mac_rx_config(priv);
1203 gfar_mac_tx_config(priv);
1205 gfar_set_mac_address(priv->ndev);
1207 gfar_set_multi(priv->ndev);
1209 /* clear ievent and imask before configuring coalescing */
1210 gfar_ints_disable(priv);
1212 /* Configure the coalescing support */
1213 gfar_configure_coalescing_all(priv);
1216 static void gfar_hw_init(struct gfar_private *priv)
1218 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1221 /* Stop the DMA engine now, in case it was running before
1222 * (The firmware could have used it, and left it running).
1226 gfar_mac_reset(priv);
1228 /* Zero out the rmon mib registers if it has them */
1229 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1230 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1232 /* Mask off the CAM interrupts */
1233 gfar_write(®s->rmon.cam1, 0xffffffff);
1234 gfar_write(®s->rmon.cam2, 0xffffffff);
1237 /* Initialize ECNTRL */
1238 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
1240 /* Set the extraction length and index */
1241 attrs = ATTRELI_EL(priv->rx_stash_size) |
1242 ATTRELI_EI(priv->rx_stash_index);
1244 gfar_write(®s->attreli, attrs);
1246 /* Start with defaults, and add stashing
1247 * depending on driver parameters
1249 attrs = ATTR_INIT_SETTINGS;
1251 if (priv->bd_stash_en)
1252 attrs |= ATTR_BDSTASH;
1254 if (priv->rx_stash_size != 0)
1255 attrs |= ATTR_BUFSTASH;
1257 gfar_write(®s->attr, attrs);
1260 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1261 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1262 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1264 /* Program the interrupt steering regs, only for MG devices */
1265 if (priv->num_grps > 1)
1266 gfar_write_isrg(priv);
1269 static void gfar_init_addr_hash_table(struct gfar_private *priv)
1271 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1273 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1274 priv->extended_hash = 1;
1275 priv->hash_width = 9;
1277 priv->hash_regs[0] = ®s->igaddr0;
1278 priv->hash_regs[1] = ®s->igaddr1;
1279 priv->hash_regs[2] = ®s->igaddr2;
1280 priv->hash_regs[3] = ®s->igaddr3;
1281 priv->hash_regs[4] = ®s->igaddr4;
1282 priv->hash_regs[5] = ®s->igaddr5;
1283 priv->hash_regs[6] = ®s->igaddr6;
1284 priv->hash_regs[7] = ®s->igaddr7;
1285 priv->hash_regs[8] = ®s->gaddr0;
1286 priv->hash_regs[9] = ®s->gaddr1;
1287 priv->hash_regs[10] = ®s->gaddr2;
1288 priv->hash_regs[11] = ®s->gaddr3;
1289 priv->hash_regs[12] = ®s->gaddr4;
1290 priv->hash_regs[13] = ®s->gaddr5;
1291 priv->hash_regs[14] = ®s->gaddr6;
1292 priv->hash_regs[15] = ®s->gaddr7;
1295 priv->extended_hash = 0;
1296 priv->hash_width = 8;
1298 priv->hash_regs[0] = ®s->gaddr0;
1299 priv->hash_regs[1] = ®s->gaddr1;
1300 priv->hash_regs[2] = ®s->gaddr2;
1301 priv->hash_regs[3] = ®s->gaddr3;
1302 priv->hash_regs[4] = ®s->gaddr4;
1303 priv->hash_regs[5] = ®s->gaddr5;
1304 priv->hash_regs[6] = ®s->gaddr6;
1305 priv->hash_regs[7] = ®s->gaddr7;
1309 /* Set up the ethernet device structure, private data,
1310 * and anything else we need before we start
1312 static int gfar_probe(struct platform_device *ofdev)
1314 struct device_node *np = ofdev->dev.of_node;
1315 struct net_device *dev = NULL;
1316 struct gfar_private *priv = NULL;
1319 err = gfar_of_init(ofdev, &dev);
1324 priv = netdev_priv(dev);
1326 priv->ofdev = ofdev;
1327 priv->dev = &ofdev->dev;
1328 SET_NETDEV_DEV(dev, &ofdev->dev);
1330 INIT_WORK(&priv->reset_task, gfar_reset_task);
1332 platform_set_drvdata(ofdev, priv);
1334 gfar_detect_errata(priv);
1336 /* Set the dev->base_addr to the gfar reg region */
1337 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1339 /* Fill in the dev structure */
1340 dev->watchdog_timeo = TX_TIMEOUT;
1341 /* MTU range: 50 - 9586 */
1344 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
1345 dev->netdev_ops = &gfar_netdev_ops;
1346 dev->ethtool_ops = &gfar_ethtool_ops;
1348 /* Register for napi ...We are registering NAPI for each grp */
1349 for (i = 0; i < priv->num_grps; i++) {
1350 if (priv->poll_mode == GFAR_SQ_POLLING) {
1351 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1352 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1353 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
1354 gfar_poll_tx_sq, 2);
1356 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1357 gfar_poll_rx, GFAR_DEV_WEIGHT);
1358 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
1363 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1364 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1366 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1367 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1370 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1371 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1372 NETIF_F_HW_VLAN_CTAG_RX;
1373 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1376 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1378 gfar_init_addr_hash_table(priv);
1380 /* Insert receive time stamps into padding alignment bytes, and
1381 * plus 2 bytes padding to ensure the cpu alignment.
1383 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1384 priv->padding = 8 + DEFAULT_PADDING;
1386 if (dev->features & NETIF_F_IP_CSUM ||
1387 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1388 dev->needed_headroom = GMAC_FCB_LEN;
1390 /* Initializing some of the rx/tx queue level parameters */
1391 for (i = 0; i < priv->num_tx_queues; i++) {
1392 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1393 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1394 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1395 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1398 for (i = 0; i < priv->num_rx_queues; i++) {
1399 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1400 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1401 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1404 /* Always enable rx filer if available */
1405 priv->rx_filer_enable =
1406 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
1407 /* Enable most messages by default */
1408 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1409 /* use pritority h/w tx queue scheduling for single queue devices */
1410 if (priv->num_tx_queues == 1)
1411 priv->prio_sched_en = 1;
1413 set_bit(GFAR_DOWN, &priv->state);
1417 /* Carrier starts down, phylib will bring it up */
1418 netif_carrier_off(dev);
1420 err = register_netdev(dev);
1423 pr_err("%s: Cannot register net device, aborting\n", dev->name);
1427 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
1428 priv->wol_supported |= GFAR_WOL_MAGIC;
1430 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
1431 priv->rx_filer_enable)
1432 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
1434 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
1436 /* fill out IRQ number and name fields */
1437 for (i = 0; i < priv->num_grps; i++) {
1438 struct gfar_priv_grp *grp = &priv->gfargrp[i];
1439 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1440 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1441 dev->name, "_g", '0' + i, "_tx");
1442 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1443 dev->name, "_g", '0' + i, "_rx");
1444 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1445 dev->name, "_g", '0' + i, "_er");
1447 strcpy(gfar_irq(grp, TX)->name, dev->name);
1450 /* Initialize the filer table */
1451 gfar_init_filer_table(priv);
1453 /* Print out the device info */
1454 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1456 /* Even more device info helps when determining which kernel
1457 * provided which set of benchmarks.
1459 netdev_info(dev, "Running with NAPI enabled\n");
1460 for (i = 0; i < priv->num_rx_queues; i++)
1461 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1462 i, priv->rx_queue[i]->rx_ring_size);
1463 for (i = 0; i < priv->num_tx_queues; i++)
1464 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1465 i, priv->tx_queue[i]->tx_ring_size);
1470 if (of_phy_is_fixed_link(np))
1471 of_phy_deregister_fixed_link(np);
1472 unmap_group_regs(priv);
1473 gfar_free_rx_queues(priv);
1474 gfar_free_tx_queues(priv);
1475 of_node_put(priv->phy_node);
1476 of_node_put(priv->tbi_node);
1477 free_gfar_dev(priv);
1481 static int gfar_remove(struct platform_device *ofdev)
1483 struct gfar_private *priv = platform_get_drvdata(ofdev);
1484 struct device_node *np = ofdev->dev.of_node;
1486 of_node_put(priv->phy_node);
1487 of_node_put(priv->tbi_node);
1489 unregister_netdev(priv->ndev);
1491 if (of_phy_is_fixed_link(np))
1492 of_phy_deregister_fixed_link(np);
1494 unmap_group_regs(priv);
1495 gfar_free_rx_queues(priv);
1496 gfar_free_tx_queues(priv);
1497 free_gfar_dev(priv);
1504 static void __gfar_filer_disable(struct gfar_private *priv)
1506 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1509 temp = gfar_read(®s->rctrl);
1510 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
1511 gfar_write(®s->rctrl, temp);
1514 static void __gfar_filer_enable(struct gfar_private *priv)
1516 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1519 temp = gfar_read(®s->rctrl);
1520 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
1521 gfar_write(®s->rctrl, temp);
1524 /* Filer rules implementing wol capabilities */
1525 static void gfar_filer_config_wol(struct gfar_private *priv)
1530 __gfar_filer_disable(priv);
1532 /* clear the filer table, reject any packet by default */
1533 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
1534 for (i = 0; i <= MAX_FILER_IDX; i++)
1535 gfar_write_filer(priv, i, rqfcr, 0);
1538 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
1539 /* unicast packet, accept it */
1540 struct net_device *ndev = priv->ndev;
1541 /* get the default rx queue index */
1542 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
1543 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
1544 (ndev->dev_addr[1] << 8) |
1547 rqfcr = (qindex << 10) | RQFCR_AND |
1548 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
1550 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1552 dest_mac_addr = (ndev->dev_addr[3] << 16) |
1553 (ndev->dev_addr[4] << 8) |
1555 rqfcr = (qindex << 10) | RQFCR_GPI |
1556 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
1557 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1560 __gfar_filer_enable(priv);
1563 static void gfar_filer_restore_table(struct gfar_private *priv)
1568 __gfar_filer_disable(priv);
1570 for (i = 0; i <= MAX_FILER_IDX; i++) {
1571 rqfcr = priv->ftp_rqfcr[i];
1572 rqfpr = priv->ftp_rqfpr[i];
1573 gfar_write_filer(priv, i, rqfcr, rqfpr);
1576 __gfar_filer_enable(priv);
1579 /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
1580 static void gfar_start_wol_filer(struct gfar_private *priv)
1582 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1586 /* Enable Rx hw queues */
1587 gfar_write(®s->rqueue, priv->rqueue);
1589 /* Initialize DMACTRL to have WWR and WOP */
1590 tempval = gfar_read(®s->dmactrl);
1591 tempval |= DMACTRL_INIT_SETTINGS;
1592 gfar_write(®s->dmactrl, tempval);
1594 /* Make sure we aren't stopped */
1595 tempval = gfar_read(®s->dmactrl);
1596 tempval &= ~DMACTRL_GRS;
1597 gfar_write(®s->dmactrl, tempval);
1599 for (i = 0; i < priv->num_grps; i++) {
1600 regs = priv->gfargrp[i].regs;
1601 /* Clear RHLT, so that the DMA starts polling now */
1602 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1603 /* enable the Filer General Purpose Interrupt */
1604 gfar_write(®s->imask, IMASK_FGPI);
1608 tempval = gfar_read(®s->maccfg1);
1609 tempval |= MACCFG1_RX_EN;
1610 gfar_write(®s->maccfg1, tempval);
1613 static int gfar_suspend(struct device *dev)
1615 struct gfar_private *priv = dev_get_drvdata(dev);
1616 struct net_device *ndev = priv->ndev;
1617 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1619 u16 wol = priv->wol_opts;
1621 if (!netif_running(ndev))
1625 netif_tx_lock(ndev);
1626 netif_device_detach(ndev);
1627 netif_tx_unlock(ndev);
1631 if (wol & GFAR_WOL_MAGIC) {
1632 /* Enable interrupt on Magic Packet */
1633 gfar_write(®s->imask, IMASK_MAG);
1635 /* Enable Magic Packet mode */
1636 tempval = gfar_read(®s->maccfg2);
1637 tempval |= MACCFG2_MPEN;
1638 gfar_write(®s->maccfg2, tempval);
1640 /* re-enable the Rx block */
1641 tempval = gfar_read(®s->maccfg1);
1642 tempval |= MACCFG1_RX_EN;
1643 gfar_write(®s->maccfg1, tempval);
1645 } else if (wol & GFAR_WOL_FILER_UCAST) {
1646 gfar_filer_config_wol(priv);
1647 gfar_start_wol_filer(priv);
1650 phy_stop(ndev->phydev);
1656 static int gfar_resume(struct device *dev)
1658 struct gfar_private *priv = dev_get_drvdata(dev);
1659 struct net_device *ndev = priv->ndev;
1660 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1662 u16 wol = priv->wol_opts;
1664 if (!netif_running(ndev))
1667 if (wol & GFAR_WOL_MAGIC) {
1668 /* Disable Magic Packet mode */
1669 tempval = gfar_read(®s->maccfg2);
1670 tempval &= ~MACCFG2_MPEN;
1671 gfar_write(®s->maccfg2, tempval);
1673 } else if (wol & GFAR_WOL_FILER_UCAST) {
1674 /* need to stop rx only, tx is already down */
1676 gfar_filer_restore_table(priv);
1679 phy_start(ndev->phydev);
1684 netif_device_attach(ndev);
1690 static int gfar_restore(struct device *dev)
1692 struct gfar_private *priv = dev_get_drvdata(dev);
1693 struct net_device *ndev = priv->ndev;
1695 if (!netif_running(ndev)) {
1696 netif_device_attach(ndev);
1701 gfar_init_bds(ndev);
1703 gfar_mac_reset(priv);
1705 gfar_init_tx_rx_base(priv);
1711 priv->oldduplex = -1;
1714 phy_start(ndev->phydev);
1716 netif_device_attach(ndev);
1722 static const struct dev_pm_ops gfar_pm_ops = {
1723 .suspend = gfar_suspend,
1724 .resume = gfar_resume,
1725 .freeze = gfar_suspend,
1726 .thaw = gfar_resume,
1727 .restore = gfar_restore,
1730 #define GFAR_PM_OPS (&gfar_pm_ops)
1734 #define GFAR_PM_OPS NULL
1738 /* Reads the controller's registers to determine what interface
1739 * connects it to the PHY.
1741 static phy_interface_t gfar_get_interface(struct net_device *dev)
1743 struct gfar_private *priv = netdev_priv(dev);
1744 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1747 ecntrl = gfar_read(®s->ecntrl);
1749 if (ecntrl & ECNTRL_SGMII_MODE)
1750 return PHY_INTERFACE_MODE_SGMII;
1752 if (ecntrl & ECNTRL_TBI_MODE) {
1753 if (ecntrl & ECNTRL_REDUCED_MODE)
1754 return PHY_INTERFACE_MODE_RTBI;
1756 return PHY_INTERFACE_MODE_TBI;
1759 if (ecntrl & ECNTRL_REDUCED_MODE) {
1760 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1761 return PHY_INTERFACE_MODE_RMII;
1764 phy_interface_t interface = priv->interface;
1766 /* This isn't autodetected right now, so it must
1767 * be set by the device tree or platform code.
1769 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1770 return PHY_INTERFACE_MODE_RGMII_ID;
1772 return PHY_INTERFACE_MODE_RGMII;
1776 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1777 return PHY_INTERFACE_MODE_GMII;
1779 return PHY_INTERFACE_MODE_MII;
1783 /* Initializes driver's PHY state, and attaches to the PHY.
1784 * Returns 0 on success.
1786 static int init_phy(struct net_device *dev)
1788 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1789 struct gfar_private *priv = netdev_priv(dev);
1790 phy_interface_t interface;
1791 struct phy_device *phydev;
1792 struct ethtool_eee edata;
1794 linkmode_set_bit_array(phy_10_100_features_array,
1795 ARRAY_SIZE(phy_10_100_features_array),
1797 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
1798 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
1799 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1800 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
1804 priv->oldduplex = -1;
1806 interface = gfar_get_interface(dev);
1808 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1811 dev_err(&dev->dev, "could not attach to PHY\n");
1815 if (interface == PHY_INTERFACE_MODE_SGMII)
1816 gfar_configure_serdes(dev);
1818 /* Remove any features not supported by the controller */
1819 linkmode_and(phydev->supported, phydev->supported, mask);
1820 linkmode_copy(phydev->advertising, phydev->supported);
1822 /* Add support for flow control */
1823 phy_support_asym_pause(phydev);
1825 /* disable EEE autoneg, EEE not supported by eTSEC */
1826 memset(&edata, 0, sizeof(struct ethtool_eee));
1827 phy_ethtool_set_eee(phydev, &edata);
1832 /* Initialize TBI PHY interface for communicating with the
1833 * SERDES lynx PHY on the chip. We communicate with this PHY
1834 * through the MDIO bus on each controller, treating it as a
1835 * "normal" PHY at the address found in the TBIPA register. We assume
1836 * that the TBIPA register is valid. Either the MDIO bus code will set
1837 * it to a value that doesn't conflict with other PHYs on the bus, or the
1838 * value doesn't matter, as there are no other PHYs on the bus.
1840 static void gfar_configure_serdes(struct net_device *dev)
1842 struct gfar_private *priv = netdev_priv(dev);
1843 struct phy_device *tbiphy;
1845 if (!priv->tbi_node) {
1846 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1847 "device tree specify a tbi-handle\n");
1851 tbiphy = of_phy_find_device(priv->tbi_node);
1853 dev_err(&dev->dev, "error: Could not get TBI device\n");
1857 /* If the link is already up, we must already be ok, and don't need to
1858 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1859 * everything for us? Resetting it takes the link down and requires
1860 * several seconds for it to come back.
1862 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1863 put_device(&tbiphy->mdio.dev);
1867 /* Single clk mode, mii mode off(for serdes communication) */
1868 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1870 phy_write(tbiphy, MII_ADVERTISE,
1871 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1872 ADVERTISE_1000XPSE_ASYM);
1874 phy_write(tbiphy, MII_BMCR,
1875 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1878 put_device(&tbiphy->mdio.dev);
1881 static int __gfar_is_rx_idle(struct gfar_private *priv)
1885 /* Normaly TSEC should not hang on GRS commands, so we should
1886 * actually wait for IEVENT_GRSC flag.
1888 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1891 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1892 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1893 * and the Rx can be safely reset.
1895 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1897 if ((res & 0xffff) == (res >> 16))
1903 /* Halt the receive and transmit queues */
1904 static void gfar_halt_nodisable(struct gfar_private *priv)
1906 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1908 unsigned int timeout;
1911 gfar_ints_disable(priv);
1913 if (gfar_is_dma_stopped(priv))
1916 /* Stop the DMA, and wait for it to stop */
1917 tempval = gfar_read(®s->dmactrl);
1918 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1919 gfar_write(®s->dmactrl, tempval);
1923 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1929 stopped = gfar_is_dma_stopped(priv);
1931 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1932 !__gfar_is_rx_idle(priv))
1936 /* Halt the receive and transmit queues */
1937 void gfar_halt(struct gfar_private *priv)
1939 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1942 /* Dissable the Rx/Tx hw queues */
1943 gfar_write(®s->rqueue, 0);
1944 gfar_write(®s->tqueue, 0);
1948 gfar_halt_nodisable(priv);
1950 /* Disable Rx/Tx DMA */
1951 tempval = gfar_read(®s->maccfg1);
1952 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1953 gfar_write(®s->maccfg1, tempval);
1956 void stop_gfar(struct net_device *dev)
1958 struct gfar_private *priv = netdev_priv(dev);
1960 netif_tx_stop_all_queues(dev);
1962 smp_mb__before_atomic();
1963 set_bit(GFAR_DOWN, &priv->state);
1964 smp_mb__after_atomic();
1968 /* disable ints and gracefully shut down Rx/Tx DMA */
1971 phy_stop(dev->phydev);
1973 free_skb_resources(priv);
1976 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1978 struct txbd8 *txbdp;
1979 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1982 txbdp = tx_queue->tx_bd_base;
1984 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1985 if (!tx_queue->tx_skbuff[i])
1988 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1989 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1991 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1994 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1995 be16_to_cpu(txbdp->length),
1999 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
2000 tx_queue->tx_skbuff[i] = NULL;
2002 kfree(tx_queue->tx_skbuff);
2003 tx_queue->tx_skbuff = NULL;
2006 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
2010 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
2013 dev_kfree_skb(rx_queue->skb);
2015 for (i = 0; i < rx_queue->rx_ring_size; i++) {
2016 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
2025 dma_unmap_page(rx_queue->dev, rxb->dma,
2026 PAGE_SIZE, DMA_FROM_DEVICE);
2027 __free_page(rxb->page);
2032 kfree(rx_queue->rx_buff);
2033 rx_queue->rx_buff = NULL;
2036 /* If there are any tx skbs or rx skbs still around, free them.
2037 * Then free tx_skbuff and rx_skbuff
2039 static void free_skb_resources(struct gfar_private *priv)
2041 struct gfar_priv_tx_q *tx_queue = NULL;
2042 struct gfar_priv_rx_q *rx_queue = NULL;
2045 /* Go through all the buffer descriptors and free their data buffers */
2046 for (i = 0; i < priv->num_tx_queues; i++) {
2047 struct netdev_queue *txq;
2049 tx_queue = priv->tx_queue[i];
2050 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
2051 if (tx_queue->tx_skbuff)
2052 free_skb_tx_queue(tx_queue);
2053 netdev_tx_reset_queue(txq);
2056 for (i = 0; i < priv->num_rx_queues; i++) {
2057 rx_queue = priv->rx_queue[i];
2058 if (rx_queue->rx_buff)
2059 free_skb_rx_queue(rx_queue);
2062 dma_free_coherent(priv->dev,
2063 sizeof(struct txbd8) * priv->total_tx_ring_size +
2064 sizeof(struct rxbd8) * priv->total_rx_ring_size,
2065 priv->tx_queue[0]->tx_bd_base,
2066 priv->tx_queue[0]->tx_bd_dma_base);
2069 void gfar_start(struct gfar_private *priv)
2071 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2075 /* Enable Rx/Tx hw queues */
2076 gfar_write(®s->rqueue, priv->rqueue);
2077 gfar_write(®s->tqueue, priv->tqueue);
2079 /* Initialize DMACTRL to have WWR and WOP */
2080 tempval = gfar_read(®s->dmactrl);
2081 tempval |= DMACTRL_INIT_SETTINGS;
2082 gfar_write(®s->dmactrl, tempval);
2084 /* Make sure we aren't stopped */
2085 tempval = gfar_read(®s->dmactrl);
2086 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
2087 gfar_write(®s->dmactrl, tempval);
2089 for (i = 0; i < priv->num_grps; i++) {
2090 regs = priv->gfargrp[i].regs;
2091 /* Clear THLT/RHLT, so that the DMA starts polling now */
2092 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
2093 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
2096 /* Enable Rx/Tx DMA */
2097 tempval = gfar_read(®s->maccfg1);
2098 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
2099 gfar_write(®s->maccfg1, tempval);
2101 gfar_ints_enable(priv);
2103 netif_trans_update(priv->ndev); /* prevent tx timeout */
2106 static void free_grp_irqs(struct gfar_priv_grp *grp)
2108 free_irq(gfar_irq(grp, TX)->irq, grp);
2109 free_irq(gfar_irq(grp, RX)->irq, grp);
2110 free_irq(gfar_irq(grp, ER)->irq, grp);
2113 static int register_grp_irqs(struct gfar_priv_grp *grp)
2115 struct gfar_private *priv = grp->priv;
2116 struct net_device *dev = priv->ndev;
2119 /* If the device has multiple interrupts, register for
2120 * them. Otherwise, only register for the one
2122 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2123 /* Install our interrupt handlers for Error,
2124 * Transmit, and Receive
2126 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2127 gfar_irq(grp, ER)->name, grp);
2129 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2130 gfar_irq(grp, ER)->irq);
2134 enable_irq_wake(gfar_irq(grp, ER)->irq);
2136 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2137 gfar_irq(grp, TX)->name, grp);
2139 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2140 gfar_irq(grp, TX)->irq);
2143 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2144 gfar_irq(grp, RX)->name, grp);
2146 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2147 gfar_irq(grp, RX)->irq);
2150 enable_irq_wake(gfar_irq(grp, RX)->irq);
2153 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2154 gfar_irq(grp, TX)->name, grp);
2156 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2157 gfar_irq(grp, TX)->irq);
2160 enable_irq_wake(gfar_irq(grp, TX)->irq);
2166 free_irq(gfar_irq(grp, TX)->irq, grp);
2168 free_irq(gfar_irq(grp, ER)->irq, grp);
2174 static void gfar_free_irq(struct gfar_private *priv)
2179 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2180 for (i = 0; i < priv->num_grps; i++)
2181 free_grp_irqs(&priv->gfargrp[i]);
2183 for (i = 0; i < priv->num_grps; i++)
2184 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2189 static int gfar_request_irq(struct gfar_private *priv)
2193 for (i = 0; i < priv->num_grps; i++) {
2194 err = register_grp_irqs(&priv->gfargrp[i]);
2196 for (j = 0; j < i; j++)
2197 free_grp_irqs(&priv->gfargrp[j]);
2205 /* Bring the controller up and running */
2206 int startup_gfar(struct net_device *ndev)
2208 struct gfar_private *priv = netdev_priv(ndev);
2211 gfar_mac_reset(priv);
2213 err = gfar_alloc_skb_resources(ndev);
2217 gfar_init_tx_rx_base(priv);
2219 smp_mb__before_atomic();
2220 clear_bit(GFAR_DOWN, &priv->state);
2221 smp_mb__after_atomic();
2223 /* Start Rx/Tx DMA and enable the interrupts */
2226 /* force link state update after mac reset */
2229 priv->oldduplex = -1;
2231 phy_start(ndev->phydev);
2235 netif_tx_wake_all_queues(ndev);
2240 /* Called when something needs to use the ethernet device
2241 * Returns 0 for success.
2243 static int gfar_enet_open(struct net_device *dev)
2245 struct gfar_private *priv = netdev_priv(dev);
2248 err = init_phy(dev);
2252 err = gfar_request_irq(priv);
2256 err = startup_gfar(dev);
2263 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2265 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
2267 memset(fcb, 0, GMAC_FCB_LEN);
2272 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2275 /* If we're here, it's a IP packet with a TCP or UDP
2276 * payload. We set it to checksum, using a pseudo-header
2279 u8 flags = TXFCB_DEFAULT;
2281 /* Tell the controller what the protocol is
2282 * And provide the already calculated phcs
2284 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2286 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
2288 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
2290 /* l3os is the distance between the start of the
2291 * frame (skb->data) and the start of the IP hdr.
2292 * l4os is the distance between the start of the
2293 * l3 hdr and the l4 hdr
2295 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
2296 fcb->l4os = skb_network_header_len(skb);
2301 static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2303 fcb->flags |= TXFCB_VLN;
2304 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
2307 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2308 struct txbd8 *base, int ring_size)
2310 struct txbd8 *new_bd = bdp + stride;
2312 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2315 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2318 return skip_txbd(bdp, 1, base, ring_size);
2321 /* eTSEC12: csum generation not supported for some fcb offsets */
2322 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2323 unsigned long fcb_addr)
2325 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2326 (fcb_addr % 0x20) > 0x18);
2329 /* eTSEC76: csum generation for frames larger than 2500 may
2330 * cause excess delays before start of transmission
2332 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2335 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2339 /* This is called by the kernel when a frame is ready for transmission.
2340 * It is pointed to by the dev->hard_start_xmit function pointer
2342 static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2344 struct gfar_private *priv = netdev_priv(dev);
2345 struct gfar_priv_tx_q *tx_queue = NULL;
2346 struct netdev_queue *txq;
2347 struct gfar __iomem *regs = NULL;
2348 struct txfcb *fcb = NULL;
2349 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2353 int do_tstamp, do_csum, do_vlan;
2355 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2357 rq = skb->queue_mapping;
2358 tx_queue = priv->tx_queue[rq];
2359 txq = netdev_get_tx_queue(dev, rq);
2360 base = tx_queue->tx_bd_base;
2361 regs = tx_queue->grp->regs;
2363 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2364 do_vlan = skb_vlan_tag_present(skb);
2365 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2368 if (do_csum || do_vlan)
2369 fcb_len = GMAC_FCB_LEN;
2371 /* check if time stamp should be generated */
2372 if (unlikely(do_tstamp))
2373 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2375 /* make space for additional header when fcb is needed */
2376 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2377 struct sk_buff *skb_new;
2379 skb_new = skb_realloc_headroom(skb, fcb_len);
2381 dev->stats.tx_errors++;
2382 dev_kfree_skb_any(skb);
2383 return NETDEV_TX_OK;
2387 skb_set_owner_w(skb_new, skb->sk);
2388 dev_consume_skb_any(skb);
2392 /* total number of fragments in the SKB */
2393 nr_frags = skb_shinfo(skb)->nr_frags;
2395 /* calculate the required number of TxBDs for this skb */
2396 if (unlikely(do_tstamp))
2397 nr_txbds = nr_frags + 2;
2399 nr_txbds = nr_frags + 1;
2401 /* check if there is space to queue this packet */
2402 if (nr_txbds > tx_queue->num_txbdfree) {
2403 /* no space, stop the queue */
2404 netif_tx_stop_queue(txq);
2405 dev->stats.tx_fifo_errors++;
2406 return NETDEV_TX_BUSY;
2409 /* Update transmit stats */
2410 bytes_sent = skb->len;
2411 tx_queue->stats.tx_bytes += bytes_sent;
2412 /* keep Tx bytes on wire for BQL accounting */
2413 GFAR_CB(skb)->bytes_sent = bytes_sent;
2414 tx_queue->stats.tx_packets++;
2416 txbdp = txbdp_start = tx_queue->cur_tx;
2417 lstatus = be32_to_cpu(txbdp->lstatus);
2419 /* Add TxPAL between FCB and frame if required */
2420 if (unlikely(do_tstamp)) {
2421 skb_push(skb, GMAC_TXPAL_LEN);
2422 memset(skb->data, 0, GMAC_TXPAL_LEN);
2425 /* Add TxFCB if required */
2427 fcb = gfar_add_fcb(skb);
2428 lstatus |= BD_LFLAG(TXBD_TOE);
2431 /* Set up checksumming */
2433 gfar_tx_checksum(skb, fcb, fcb_len);
2435 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2436 unlikely(gfar_csum_errata_76(priv, skb->len))) {
2437 __skb_pull(skb, GMAC_FCB_LEN);
2438 skb_checksum_help(skb);
2439 if (do_vlan || do_tstamp) {
2440 /* put back a new fcb for vlan/tstamp TOE */
2441 fcb = gfar_add_fcb(skb);
2443 /* Tx TOE not used */
2444 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2451 gfar_tx_vlan(skb, fcb);
2453 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2455 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2458 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
2460 /* Time stamp insertion requires one additional TxBD */
2461 if (unlikely(do_tstamp))
2462 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2463 tx_queue->tx_ring_size);
2465 if (likely(!nr_frags)) {
2466 if (likely(!do_tstamp))
2467 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2469 u32 lstatus_start = lstatus;
2471 /* Place the fragment addresses and lengths into the TxBDs */
2472 frag = &skb_shinfo(skb)->frags[0];
2473 for (i = 0; i < nr_frags; i++, frag++) {
2476 /* Point at the next BD, wrapping as needed */
2477 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2479 size = skb_frag_size(frag);
2481 lstatus = be32_to_cpu(txbdp->lstatus) | size |
2482 BD_LFLAG(TXBD_READY);
2484 /* Handle the last BD specially */
2485 if (i == nr_frags - 1)
2486 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2488 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
2489 size, DMA_TO_DEVICE);
2490 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2493 /* set the TxBD length and buffer pointer */
2494 txbdp->bufPtr = cpu_to_be32(bufaddr);
2495 txbdp->lstatus = cpu_to_be32(lstatus);
2498 lstatus = lstatus_start;
2501 /* If time stamping is requested one additional TxBD must be set up. The
2502 * first TxBD points to the FCB and must have a data length of
2503 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2504 * the full frame length.
2506 if (unlikely(do_tstamp)) {
2507 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2509 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
2512 lstatus_ts |= BD_LFLAG(TXBD_READY) |
2513 (skb_headlen(skb) - fcb_len);
2515 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2517 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
2518 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
2519 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2521 /* Setup tx hardware time stamping */
2522 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2525 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2528 netdev_tx_sent_queue(txq, bytes_sent);
2532 txbdp_start->lstatus = cpu_to_be32(lstatus);
2534 gfar_wmb(); /* force lstatus write before tx_skbuff */
2536 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2538 /* Update the current skb pointer to the next entry we will use
2539 * (wrapping if necessary)
2541 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2542 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2544 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2546 /* We can work in parallel with gfar_clean_tx_ring(), except
2547 * when modifying num_txbdfree. Note that we didn't grab the lock
2548 * when we were reading the num_txbdfree and checking for available
2549 * space, that's because outside of this function it can only grow.
2551 spin_lock_bh(&tx_queue->txlock);
2552 /* reduce TxBD free count */
2553 tx_queue->num_txbdfree -= (nr_txbds);
2554 spin_unlock_bh(&tx_queue->txlock);
2556 /* If the next BD still needs to be cleaned up, then the bds
2557 * are full. We need to tell the kernel to stop sending us stuff.
2559 if (!tx_queue->num_txbdfree) {
2560 netif_tx_stop_queue(txq);
2562 dev->stats.tx_fifo_errors++;
2565 /* Tell the DMA to go go go */
2566 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2568 return NETDEV_TX_OK;
2571 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2573 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2574 for (i = 0; i < nr_frags; i++) {
2575 lstatus = be32_to_cpu(txbdp->lstatus);
2576 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2579 lstatus &= ~BD_LFLAG(TXBD_READY);
2580 txbdp->lstatus = cpu_to_be32(lstatus);
2581 bufaddr = be32_to_cpu(txbdp->bufPtr);
2582 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
2584 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2587 dev_kfree_skb_any(skb);
2588 return NETDEV_TX_OK;
2591 /* Stops the kernel queue, and halts the controller */
2592 static int gfar_close(struct net_device *dev)
2594 struct gfar_private *priv = netdev_priv(dev);
2596 cancel_work_sync(&priv->reset_task);
2599 /* Disconnect from the PHY */
2600 phy_disconnect(dev->phydev);
2602 gfar_free_irq(priv);
2607 /* Changes the mac address if the controller is not running. */
2608 static int gfar_set_mac_address(struct net_device *dev)
2610 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2615 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2617 struct gfar_private *priv = netdev_priv(dev);
2619 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2622 if (dev->flags & IFF_UP)
2627 if (dev->flags & IFF_UP)
2630 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2635 void reset_gfar(struct net_device *ndev)
2637 struct gfar_private *priv = netdev_priv(ndev);
2639 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2645 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2648 /* gfar_reset_task gets scheduled when a packet has not been
2649 * transmitted after a set amount of time.
2650 * For now, assume that clearing out all the structures, and
2651 * starting over will fix the problem.
2653 static void gfar_reset_task(struct work_struct *work)
2655 struct gfar_private *priv = container_of(work, struct gfar_private,
2657 reset_gfar(priv->ndev);
2660 static void gfar_timeout(struct net_device *dev)
2662 struct gfar_private *priv = netdev_priv(dev);
2664 dev->stats.tx_errors++;
2665 schedule_work(&priv->reset_task);
2668 /* Interrupt Handler for Transmit complete */
2669 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2671 struct net_device *dev = tx_queue->dev;
2672 struct netdev_queue *txq;
2673 struct gfar_private *priv = netdev_priv(dev);
2674 struct txbd8 *bdp, *next = NULL;
2675 struct txbd8 *lbdp = NULL;
2676 struct txbd8 *base = tx_queue->tx_bd_base;
2677 struct sk_buff *skb;
2679 int tx_ring_size = tx_queue->tx_ring_size;
2680 int frags = 0, nr_txbds = 0;
2683 int tqi = tx_queue->qindex;
2684 unsigned int bytes_sent = 0;
2688 txq = netdev_get_tx_queue(dev, tqi);
2689 bdp = tx_queue->dirty_tx;
2690 skb_dirtytx = tx_queue->skb_dirtytx;
2692 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2694 frags = skb_shinfo(skb)->nr_frags;
2696 /* When time stamping, one additional TxBD must be freed.
2697 * Also, we need to dma_unmap_single() the TxPAL.
2699 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2700 nr_txbds = frags + 2;
2702 nr_txbds = frags + 1;
2704 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2706 lstatus = be32_to_cpu(lbdp->lstatus);
2708 /* Only clean completed frames */
2709 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2710 (lstatus & BD_LENGTH_MASK))
2713 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2714 next = next_txbd(bdp, base, tx_ring_size);
2715 buflen = be16_to_cpu(next->length) +
2716 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2718 buflen = be16_to_cpu(bdp->length);
2720 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2721 buflen, DMA_TO_DEVICE);
2723 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2724 struct skb_shared_hwtstamps shhwtstamps;
2725 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2728 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2729 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2730 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2731 skb_tstamp_tx(skb, &shhwtstamps);
2732 gfar_clear_txbd_status(bdp);
2736 gfar_clear_txbd_status(bdp);
2737 bdp = next_txbd(bdp, base, tx_ring_size);
2739 for (i = 0; i < frags; i++) {
2740 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2741 be16_to_cpu(bdp->length),
2743 gfar_clear_txbd_status(bdp);
2744 bdp = next_txbd(bdp, base, tx_ring_size);
2747 bytes_sent += GFAR_CB(skb)->bytes_sent;
2749 dev_kfree_skb_any(skb);
2751 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2753 skb_dirtytx = (skb_dirtytx + 1) &
2754 TX_RING_MOD_MASK(tx_ring_size);
2757 spin_lock(&tx_queue->txlock);
2758 tx_queue->num_txbdfree += nr_txbds;
2759 spin_unlock(&tx_queue->txlock);
2762 /* If we freed a buffer, we can restart transmission, if necessary */
2763 if (tx_queue->num_txbdfree &&
2764 netif_tx_queue_stopped(txq) &&
2765 !(test_bit(GFAR_DOWN, &priv->state)))
2766 netif_wake_subqueue(priv->ndev, tqi);
2768 /* Update dirty indicators */
2769 tx_queue->skb_dirtytx = skb_dirtytx;
2770 tx_queue->dirty_tx = bdp;
2772 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2775 static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
2780 page = dev_alloc_page();
2781 if (unlikely(!page))
2784 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
2785 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
2793 rxb->page_offset = 0;
2798 static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
2800 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
2801 struct gfar_extra_stats *estats = &priv->extra_stats;
2803 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
2804 atomic64_inc(&estats->rx_alloc_err);
2807 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
2811 struct gfar_rx_buff *rxb;
2814 i = rx_queue->next_to_use;
2815 bdp = &rx_queue->rx_bd_base[i];
2816 rxb = &rx_queue->rx_buff[i];
2818 while (alloc_cnt--) {
2819 /* try reuse page */
2820 if (unlikely(!rxb->page)) {
2821 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
2822 gfar_rx_alloc_err(rx_queue);
2827 /* Setup the new RxBD */
2828 gfar_init_rxbdp(rx_queue, bdp,
2829 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
2831 /* Update to the next pointer */
2835 if (unlikely(++i == rx_queue->rx_ring_size)) {
2837 bdp = rx_queue->rx_bd_base;
2838 rxb = rx_queue->rx_buff;
2842 rx_queue->next_to_use = i;
2843 rx_queue->next_to_alloc = i;
2846 static void count_errors(u32 lstatus, struct net_device *ndev)
2848 struct gfar_private *priv = netdev_priv(ndev);
2849 struct net_device_stats *stats = &ndev->stats;
2850 struct gfar_extra_stats *estats = &priv->extra_stats;
2852 /* If the packet was truncated, none of the other errors matter */
2853 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2854 stats->rx_length_errors++;
2856 atomic64_inc(&estats->rx_trunc);
2860 /* Count the errors, if there were any */
2861 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2862 stats->rx_length_errors++;
2864 if (lstatus & BD_LFLAG(RXBD_LARGE))
2865 atomic64_inc(&estats->rx_large);
2867 atomic64_inc(&estats->rx_short);
2869 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2870 stats->rx_frame_errors++;
2871 atomic64_inc(&estats->rx_nonoctet);
2873 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2874 atomic64_inc(&estats->rx_crcerr);
2875 stats->rx_crc_errors++;
2877 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2878 atomic64_inc(&estats->rx_overrun);
2879 stats->rx_over_errors++;
2883 irqreturn_t gfar_receive(int irq, void *grp_id)
2885 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2886 unsigned long flags;
2889 ievent = gfar_read(&grp->regs->ievent);
2891 if (unlikely(ievent & IEVENT_FGPI)) {
2892 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2896 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2897 spin_lock_irqsave(&grp->grplock, flags);
2898 imask = gfar_read(&grp->regs->imask);
2899 imask &= IMASK_RX_DISABLED;
2900 gfar_write(&grp->regs->imask, imask);
2901 spin_unlock_irqrestore(&grp->grplock, flags);
2902 __napi_schedule(&grp->napi_rx);
2904 /* Clear IEVENT, so interrupts aren't called again
2905 * because of the packets that have already arrived.
2907 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2913 /* Interrupt Handler for Transmit complete */
2914 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2916 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2917 unsigned long flags;
2920 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2921 spin_lock_irqsave(&grp->grplock, flags);
2922 imask = gfar_read(&grp->regs->imask);
2923 imask &= IMASK_TX_DISABLED;
2924 gfar_write(&grp->regs->imask, imask);
2925 spin_unlock_irqrestore(&grp->grplock, flags);
2926 __napi_schedule(&grp->napi_tx);
2928 /* Clear IEVENT, so interrupts aren't called again
2929 * because of the packets that have already arrived.
2931 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2937 static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2938 struct sk_buff *skb, bool first)
2940 int size = lstatus & BD_LENGTH_MASK;
2941 struct page *page = rxb->page;
2943 if (likely(first)) {
2946 /* the last fragments' length contains the full frame length */
2947 if (lstatus & BD_LFLAG(RXBD_LAST))
2950 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2951 rxb->page_offset + RXBUF_ALIGNMENT,
2952 size, GFAR_RXB_TRUESIZE);
2955 /* try reuse page */
2956 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2959 /* change offset to the other half */
2960 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2967 static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2968 struct gfar_rx_buff *old_rxb)
2970 struct gfar_rx_buff *new_rxb;
2971 u16 nta = rxq->next_to_alloc;
2973 new_rxb = &rxq->rx_buff[nta];
2975 /* find next buf that can reuse a page */
2977 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2979 /* copy page reference */
2980 *new_rxb = *old_rxb;
2982 /* sync for use by the device */
2983 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2984 old_rxb->page_offset,
2985 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2988 static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2989 u32 lstatus, struct sk_buff *skb)
2991 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2992 struct page *page = rxb->page;
2996 void *buff_addr = page_address(page) + rxb->page_offset;
2998 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2999 if (unlikely(!skb)) {
3000 gfar_rx_alloc_err(rx_queue);
3003 skb_reserve(skb, RXBUF_ALIGNMENT);
3007 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
3008 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
3010 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
3011 /* reuse the free half of the page */
3012 gfar_reuse_rx_page(rx_queue, rxb);
3014 /* page cannot be reused, unmap it */
3015 dma_unmap_page(rx_queue->dev, rxb->dma,
3016 PAGE_SIZE, DMA_FROM_DEVICE);
3019 /* clear rxb content */
3025 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
3027 /* If valid headers were found, and valid sums
3028 * were verified, then we tell the kernel that no
3029 * checksumming is necessary. Otherwise, it is [FIXME]
3031 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
3032 (RXFCB_CIP | RXFCB_CTU))
3033 skb->ip_summed = CHECKSUM_UNNECESSARY;
3035 skb_checksum_none_assert(skb);
3038 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
3039 static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
3041 struct gfar_private *priv = netdev_priv(ndev);
3042 struct rxfcb *fcb = NULL;
3044 /* fcb is at the beginning if exists */
3045 fcb = (struct rxfcb *)skb->data;
3047 /* Remove the FCB from the skb
3048 * Remove the padded bytes, if there are any
3050 if (priv->uses_rxfcb)
3051 skb_pull(skb, GMAC_FCB_LEN);
3053 /* Get receive timestamp from the skb */
3054 if (priv->hwts_rx_en) {
3055 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
3056 u64 *ns = (u64 *) skb->data;
3058 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
3059 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
3063 skb_pull(skb, priv->padding);
3065 /* Trim off the FCS */
3066 pskb_trim(skb, skb->len - ETH_FCS_LEN);
3068 if (ndev->features & NETIF_F_RXCSUM)
3069 gfar_rx_checksum(skb, fcb);
3071 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
3072 * Even if vlan rx accel is disabled, on some chips
3073 * RXFCB_VLN is pseudo randomly set.
3075 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
3076 be16_to_cpu(fcb->flags) & RXFCB_VLN)
3077 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3078 be16_to_cpu(fcb->vlctl));
3081 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
3082 * until the budget/quota has been reached. Returns the number
3085 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
3087 struct net_device *ndev = rx_queue->ndev;
3088 struct gfar_private *priv = netdev_priv(ndev);
3091 struct sk_buff *skb = rx_queue->skb;
3092 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
3093 unsigned int total_bytes = 0, total_pkts = 0;
3095 /* Get the first full descriptor */
3096 i = rx_queue->next_to_clean;
3098 while (rx_work_limit--) {
3101 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
3102 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3106 bdp = &rx_queue->rx_bd_base[i];
3107 lstatus = be32_to_cpu(bdp->lstatus);
3108 if (lstatus & BD_LFLAG(RXBD_EMPTY))
3111 /* order rx buffer descriptor reads */
3114 /* fetch next to clean buffer from the ring */
3115 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
3122 if (unlikely(++i == rx_queue->rx_ring_size))
3125 rx_queue->next_to_clean = i;
3127 /* fetch next buffer if not the last in frame */
3128 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
3131 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
3132 count_errors(lstatus, ndev);
3134 /* discard faulty buffer */
3137 rx_queue->stats.rx_dropped++;
3141 gfar_process_frame(ndev, skb);
3143 /* Increment the number of packets */
3145 total_bytes += skb->len;
3147 skb_record_rx_queue(skb, rx_queue->qindex);
3149 skb->protocol = eth_type_trans(skb, ndev);
3151 /* Send the packet up the stack */
3152 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
3157 /* Store incomplete frames for completion */
3158 rx_queue->skb = skb;
3160 rx_queue->stats.rx_packets += total_pkts;
3161 rx_queue->stats.rx_bytes += total_bytes;
3164 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3166 /* Update Last Free RxBD pointer for LFC */
3167 if (unlikely(priv->tx_actual_en)) {
3168 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3170 gfar_write(rx_queue->rfbptr, bdp_dma);
3176 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
3178 struct gfar_priv_grp *gfargrp =
3179 container_of(napi, struct gfar_priv_grp, napi_rx);
3180 struct gfar __iomem *regs = gfargrp->regs;
3181 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
3184 /* Clear IEVENT, so interrupts aren't called again
3185 * because of the packets that have already arrived
3187 gfar_write(®s->ievent, IEVENT_RX_MASK);
3189 work_done = gfar_clean_rx_ring(rx_queue, budget);
3191 if (work_done < budget) {
3193 napi_complete_done(napi, work_done);
3194 /* Clear the halt bit in RSTAT */
3195 gfar_write(®s->rstat, gfargrp->rstat);
3197 spin_lock_irq(&gfargrp->grplock);
3198 imask = gfar_read(®s->imask);
3199 imask |= IMASK_RX_DEFAULT;
3200 gfar_write(®s->imask, imask);
3201 spin_unlock_irq(&gfargrp->grplock);
3207 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
3209 struct gfar_priv_grp *gfargrp =
3210 container_of(napi, struct gfar_priv_grp, napi_tx);
3211 struct gfar __iomem *regs = gfargrp->regs;
3212 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
3215 /* Clear IEVENT, so interrupts aren't called again
3216 * because of the packets that have already arrived
3218 gfar_write(®s->ievent, IEVENT_TX_MASK);
3220 /* run Tx cleanup to completion */
3221 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
3222 gfar_clean_tx_ring(tx_queue);
3224 napi_complete(napi);
3226 spin_lock_irq(&gfargrp->grplock);
3227 imask = gfar_read(®s->imask);
3228 imask |= IMASK_TX_DEFAULT;
3229 gfar_write(®s->imask, imask);
3230 spin_unlock_irq(&gfargrp->grplock);
3235 static int gfar_poll_rx(struct napi_struct *napi, int budget)
3237 struct gfar_priv_grp *gfargrp =
3238 container_of(napi, struct gfar_priv_grp, napi_rx);
3239 struct gfar_private *priv = gfargrp->priv;
3240 struct gfar __iomem *regs = gfargrp->regs;
3241 struct gfar_priv_rx_q *rx_queue = NULL;
3242 int work_done = 0, work_done_per_q = 0;
3243 int i, budget_per_q = 0;
3244 unsigned long rstat_rxf;
3247 /* Clear IEVENT, so interrupts aren't called again
3248 * because of the packets that have already arrived
3250 gfar_write(®s->ievent, IEVENT_RX_MASK);
3252 rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK;
3254 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
3256 budget_per_q = budget/num_act_queues;
3258 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
3259 /* skip queue if not active */
3260 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
3263 rx_queue = priv->rx_queue[i];
3265 gfar_clean_rx_ring(rx_queue, budget_per_q);
3266 work_done += work_done_per_q;
3268 /* finished processing this queue */
3269 if (work_done_per_q < budget_per_q) {
3270 /* clear active queue hw indication */
3271 gfar_write(®s->rstat,
3272 RSTAT_CLEAR_RXF0 >> i);
3275 if (!num_act_queues)
3280 if (!num_act_queues) {
3282 napi_complete_done(napi, work_done);
3284 /* Clear the halt bit in RSTAT */
3285 gfar_write(®s->rstat, gfargrp->rstat);
3287 spin_lock_irq(&gfargrp->grplock);
3288 imask = gfar_read(®s->imask);
3289 imask |= IMASK_RX_DEFAULT;
3290 gfar_write(®s->imask, imask);
3291 spin_unlock_irq(&gfargrp->grplock);
3297 static int gfar_poll_tx(struct napi_struct *napi, int budget)
3299 struct gfar_priv_grp *gfargrp =
3300 container_of(napi, struct gfar_priv_grp, napi_tx);
3301 struct gfar_private *priv = gfargrp->priv;
3302 struct gfar __iomem *regs = gfargrp->regs;
3303 struct gfar_priv_tx_q *tx_queue = NULL;
3304 int has_tx_work = 0;
3307 /* Clear IEVENT, so interrupts aren't called again
3308 * because of the packets that have already arrived
3310 gfar_write(®s->ievent, IEVENT_TX_MASK);
3312 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3313 tx_queue = priv->tx_queue[i];
3314 /* run Tx cleanup to completion */
3315 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3316 gfar_clean_tx_ring(tx_queue);
3323 napi_complete(napi);
3325 spin_lock_irq(&gfargrp->grplock);
3326 imask = gfar_read(®s->imask);
3327 imask |= IMASK_TX_DEFAULT;
3328 gfar_write(®s->imask, imask);
3329 spin_unlock_irq(&gfargrp->grplock);
3336 #ifdef CONFIG_NET_POLL_CONTROLLER
3337 /* Polling 'interrupt' - used by things like netconsole to send skbs
3338 * without having to re-enable interrupts. It's not called while
3339 * the interrupt routine is executing.
3341 static void gfar_netpoll(struct net_device *dev)
3343 struct gfar_private *priv = netdev_priv(dev);
3346 /* If the device has multiple interrupts, run tx/rx */
3347 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3348 for (i = 0; i < priv->num_grps; i++) {
3349 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3351 disable_irq(gfar_irq(grp, TX)->irq);
3352 disable_irq(gfar_irq(grp, RX)->irq);
3353 disable_irq(gfar_irq(grp, ER)->irq);
3354 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3355 enable_irq(gfar_irq(grp, ER)->irq);
3356 enable_irq(gfar_irq(grp, RX)->irq);
3357 enable_irq(gfar_irq(grp, TX)->irq);
3360 for (i = 0; i < priv->num_grps; i++) {
3361 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3363 disable_irq(gfar_irq(grp, TX)->irq);
3364 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3365 enable_irq(gfar_irq(grp, TX)->irq);
3371 /* The interrupt handler for devices with one interrupt */
3372 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3374 struct gfar_priv_grp *gfargrp = grp_id;
3376 /* Save ievent for future reference */
3377 u32 events = gfar_read(&gfargrp->regs->ievent);
3379 /* Check for reception */
3380 if (events & IEVENT_RX_MASK)
3381 gfar_receive(irq, grp_id);
3383 /* Check for transmit completion */
3384 if (events & IEVENT_TX_MASK)
3385 gfar_transmit(irq, grp_id);
3387 /* Check for errors */
3388 if (events & IEVENT_ERR_MASK)
3389 gfar_error(irq, grp_id);
3394 /* Called every time the controller might need to be made
3395 * aware of new link state. The PHY code conveys this
3396 * information through variables in the phydev structure, and this
3397 * function converts those variables into the appropriate
3398 * register values, and can bring down the device if needed.
3400 static void adjust_link(struct net_device *dev)
3402 struct gfar_private *priv = netdev_priv(dev);
3403 struct phy_device *phydev = dev->phydev;
3405 if (unlikely(phydev->link != priv->oldlink ||
3406 (phydev->link && (phydev->duplex != priv->oldduplex ||
3407 phydev->speed != priv->oldspeed))))
3408 gfar_update_link_state(priv);
3411 /* Update the hash table based on the current list of multicast
3412 * addresses we subscribe to. Also, change the promiscuity of
3413 * the device based on the flags (this function is called
3414 * whenever dev->flags is changed
3416 static void gfar_set_multi(struct net_device *dev)
3418 struct netdev_hw_addr *ha;
3419 struct gfar_private *priv = netdev_priv(dev);
3420 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3423 if (dev->flags & IFF_PROMISC) {
3424 /* Set RCTRL to PROM */
3425 tempval = gfar_read(®s->rctrl);
3426 tempval |= RCTRL_PROM;
3427 gfar_write(®s->rctrl, tempval);
3429 /* Set RCTRL to not PROM */
3430 tempval = gfar_read(®s->rctrl);
3431 tempval &= ~(RCTRL_PROM);
3432 gfar_write(®s->rctrl, tempval);
3435 if (dev->flags & IFF_ALLMULTI) {
3436 /* Set the hash to rx all multicast frames */
3437 gfar_write(®s->igaddr0, 0xffffffff);
3438 gfar_write(®s->igaddr1, 0xffffffff);
3439 gfar_write(®s->igaddr2, 0xffffffff);
3440 gfar_write(®s->igaddr3, 0xffffffff);
3441 gfar_write(®s->igaddr4, 0xffffffff);
3442 gfar_write(®s->igaddr5, 0xffffffff);
3443 gfar_write(®s->igaddr6, 0xffffffff);
3444 gfar_write(®s->igaddr7, 0xffffffff);
3445 gfar_write(®s->gaddr0, 0xffffffff);
3446 gfar_write(®s->gaddr1, 0xffffffff);
3447 gfar_write(®s->gaddr2, 0xffffffff);
3448 gfar_write(®s->gaddr3, 0xffffffff);
3449 gfar_write(®s->gaddr4, 0xffffffff);
3450 gfar_write(®s->gaddr5, 0xffffffff);
3451 gfar_write(®s->gaddr6, 0xffffffff);
3452 gfar_write(®s->gaddr7, 0xffffffff);
3457 /* zero out the hash */
3458 gfar_write(®s->igaddr0, 0x0);
3459 gfar_write(®s->igaddr1, 0x0);
3460 gfar_write(®s->igaddr2, 0x0);
3461 gfar_write(®s->igaddr3, 0x0);
3462 gfar_write(®s->igaddr4, 0x0);
3463 gfar_write(®s->igaddr5, 0x0);
3464 gfar_write(®s->igaddr6, 0x0);
3465 gfar_write(®s->igaddr7, 0x0);
3466 gfar_write(®s->gaddr0, 0x0);
3467 gfar_write(®s->gaddr1, 0x0);
3468 gfar_write(®s->gaddr2, 0x0);
3469 gfar_write(®s->gaddr3, 0x0);
3470 gfar_write(®s->gaddr4, 0x0);
3471 gfar_write(®s->gaddr5, 0x0);
3472 gfar_write(®s->gaddr6, 0x0);
3473 gfar_write(®s->gaddr7, 0x0);
3475 /* If we have extended hash tables, we need to
3476 * clear the exact match registers to prepare for
3479 if (priv->extended_hash) {
3480 em_num = GFAR_EM_NUM + 1;
3481 gfar_clear_exact_match(dev);
3488 if (netdev_mc_empty(dev))
3491 /* Parse the list, and set the appropriate bits */
3492 netdev_for_each_mc_addr(ha, dev) {
3494 gfar_set_mac_for_addr(dev, idx, ha->addr);
3497 gfar_set_hash_for_addr(dev, ha->addr);
3503 /* Clears each of the exact match registers to zero, so they
3504 * don't interfere with normal reception
3506 static void gfar_clear_exact_match(struct net_device *dev)
3509 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3511 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3512 gfar_set_mac_for_addr(dev, idx, zero_arr);
3515 /* Set the appropriate hash bit for the given addr */
3516 /* The algorithm works like so:
3517 * 1) Take the Destination Address (ie the multicast address), and
3518 * do a CRC on it (little endian), and reverse the bits of the
3520 * 2) Use the 8 most significant bits as a hash into a 256-entry
3521 * table. The table is controlled through 8 32-bit registers:
3522 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3523 * gaddr7. This means that the 3 most significant bits in the
3524 * hash index which gaddr register to use, and the 5 other bits
3525 * indicate which bit (assuming an IBM numbering scheme, which
3526 * for PowerPC (tm) is usually the case) in the register holds
3529 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3532 struct gfar_private *priv = netdev_priv(dev);
3533 u32 result = ether_crc(ETH_ALEN, addr);
3534 int width = priv->hash_width;
3535 u8 whichbit = (result >> (32 - width)) & 0x1f;
3536 u8 whichreg = result >> (32 - width + 5);
3537 u32 value = (1 << (31-whichbit));
3539 tempval = gfar_read(priv->hash_regs[whichreg]);
3541 gfar_write(priv->hash_regs[whichreg], tempval);
3545 /* There are multiple MAC Address register pairs on some controllers
3546 * This function sets the numth pair to a given address
3548 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3551 struct gfar_private *priv = netdev_priv(dev);
3552 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3554 u32 __iomem *macptr = ®s->macstnaddr1;
3558 /* For a station address of 0x12345678ABCD in transmission
3559 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3560 * MACnADDR2 is set to 0x34120000.
3562 tempval = (addr[5] << 24) | (addr[4] << 16) |
3563 (addr[3] << 8) | addr[2];
3565 gfar_write(macptr, tempval);
3567 tempval = (addr[1] << 24) | (addr[0] << 16);
3569 gfar_write(macptr+1, tempval);
3572 /* GFAR error interrupt handler */
3573 static irqreturn_t gfar_error(int irq, void *grp_id)
3575 struct gfar_priv_grp *gfargrp = grp_id;
3576 struct gfar __iomem *regs = gfargrp->regs;
3577 struct gfar_private *priv= gfargrp->priv;
3578 struct net_device *dev = priv->ndev;
3580 /* Save ievent for future reference */
3581 u32 events = gfar_read(®s->ievent);
3584 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
3586 /* Magic Packet is not an error. */
3587 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3588 (events & IEVENT_MAG))
3589 events &= ~IEVENT_MAG;
3592 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3594 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3595 events, gfar_read(®s->imask));
3597 /* Update the error counters */
3598 if (events & IEVENT_TXE) {
3599 dev->stats.tx_errors++;
3601 if (events & IEVENT_LC)
3602 dev->stats.tx_window_errors++;
3603 if (events & IEVENT_CRL)
3604 dev->stats.tx_aborted_errors++;
3605 if (events & IEVENT_XFUN) {
3606 netif_dbg(priv, tx_err, dev,
3607 "TX FIFO underrun, packet dropped\n");
3608 dev->stats.tx_dropped++;
3609 atomic64_inc(&priv->extra_stats.tx_underrun);
3611 schedule_work(&priv->reset_task);
3613 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3615 if (events & IEVENT_BSY) {
3616 dev->stats.rx_over_errors++;
3617 atomic64_inc(&priv->extra_stats.rx_bsy);
3619 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3620 gfar_read(®s->rstat));
3622 if (events & IEVENT_BABR) {
3623 dev->stats.rx_errors++;
3624 atomic64_inc(&priv->extra_stats.rx_babr);
3626 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3628 if (events & IEVENT_EBERR) {
3629 atomic64_inc(&priv->extra_stats.eberr);
3630 netif_dbg(priv, rx_err, dev, "bus error\n");
3632 if (events & IEVENT_RXC)
3633 netif_dbg(priv, rx_status, dev, "control frame\n");
3635 if (events & IEVENT_BABT) {
3636 atomic64_inc(&priv->extra_stats.tx_babt);
3637 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3642 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3644 struct net_device *ndev = priv->ndev;
3645 struct phy_device *phydev = ndev->phydev;
3648 if (!phydev->duplex)
3651 if (!priv->pause_aneg_en) {
3652 if (priv->tx_pause_en)
3653 val |= MACCFG1_TX_FLOW;
3654 if (priv->rx_pause_en)
3655 val |= MACCFG1_RX_FLOW;
3657 u16 lcl_adv, rmt_adv;
3659 /* get link partner capabilities */
3662 rmt_adv = LPA_PAUSE_CAP;
3663 if (phydev->asym_pause)
3664 rmt_adv |= LPA_PAUSE_ASYM;
3666 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
3667 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3668 if (flowctrl & FLOW_CTRL_TX)
3669 val |= MACCFG1_TX_FLOW;
3670 if (flowctrl & FLOW_CTRL_RX)
3671 val |= MACCFG1_RX_FLOW;
3677 static noinline void gfar_update_link_state(struct gfar_private *priv)
3679 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3680 struct net_device *ndev = priv->ndev;
3681 struct phy_device *phydev = ndev->phydev;
3682 struct gfar_priv_rx_q *rx_queue = NULL;
3685 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3689 u32 tempval1 = gfar_read(®s->maccfg1);
3690 u32 tempval = gfar_read(®s->maccfg2);
3691 u32 ecntrl = gfar_read(®s->ecntrl);
3692 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
3694 if (phydev->duplex != priv->oldduplex) {
3695 if (!(phydev->duplex))
3696 tempval &= ~(MACCFG2_FULL_DUPLEX);
3698 tempval |= MACCFG2_FULL_DUPLEX;
3700 priv->oldduplex = phydev->duplex;
3703 if (phydev->speed != priv->oldspeed) {
3704 switch (phydev->speed) {
3707 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3709 ecntrl &= ~(ECNTRL_R100);
3714 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3716 /* Reduced mode distinguishes
3717 * between 10 and 100
3719 if (phydev->speed == SPEED_100)
3720 ecntrl |= ECNTRL_R100;
3722 ecntrl &= ~(ECNTRL_R100);
3725 netif_warn(priv, link, priv->ndev,
3726 "Ack! Speed (%d) is not 10/100/1000!\n",
3731 priv->oldspeed = phydev->speed;
3734 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3735 tempval1 |= gfar_get_flowctrl_cfg(priv);
3737 /* Turn last free buffer recording on */
3738 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
3739 for (i = 0; i < priv->num_rx_queues; i++) {
3742 rx_queue = priv->rx_queue[i];
3743 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3744 gfar_write(rx_queue->rfbptr, bdp_dma);
3747 priv->tx_actual_en = 1;
3750 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
3751 priv->tx_actual_en = 0;
3753 gfar_write(®s->maccfg1, tempval1);
3754 gfar_write(®s->maccfg2, tempval);
3755 gfar_write(®s->ecntrl, ecntrl);
3760 } else if (priv->oldlink) {
3763 priv->oldduplex = -1;
3766 if (netif_msg_link(priv))
3767 phy_print_status(phydev);
3770 static const struct of_device_id gfar_match[] =
3774 .compatible = "gianfar",
3777 .compatible = "fsl,etsec2",
3781 MODULE_DEVICE_TABLE(of, gfar_match);
3783 /* Structure for a device driver */
3784 static struct platform_driver gfar_driver = {
3786 .name = "fsl-gianfar",
3788 .of_match_table = gfar_match,
3790 .probe = gfar_probe,
3791 .remove = gfar_remove,
3794 module_platform_driver(gfar_driver);