1 /* drivers/net/ethernet/freescale/gianfar.c
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13 * Copyright 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
35 * last descriptor of the ring.
37 * When a packet is received, the RXF bit in the
38 * IEVENT register is set, triggering an interrupt when the
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
42 * of frames or amount of time have passed). In NAPI, the
43 * interrupt handler will signal there is work to be done, and
44 * exit. This method will start at the last known empty
45 * descriptor, and process every subsequent descriptor until there
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/delay.h>
74 #include <linux/netdevice.h>
75 #include <linux/etherdevice.h>
76 #include <linux/skbuff.h>
77 #include <linux/if_vlan.h>
78 #include <linux/spinlock.h>
80 #include <linux/of_address.h>
81 #include <linux/of_irq.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
88 #include <linux/net_tstamp.h>
92 #include <asm/mpc85xx.h>
94 #include <asm/uaccess.h>
95 #include <linux/module.h>
96 #include <linux/dma-mapping.h>
97 #include <linux/crc32.h>
98 #include <linux/mii.h>
99 #include <linux/phy.h>
100 #include <linux/phy_fixed.h>
101 #include <linux/of.h>
102 #include <linux/of_net.h>
106 #define TX_TIMEOUT (1*HZ)
108 const char gfar_driver_version[] = "1.3";
110 static int gfar_enet_open(struct net_device *dev);
111 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
112 static void gfar_reset_task(struct work_struct *work);
113 static void gfar_timeout(struct net_device *dev);
114 static int gfar_close(struct net_device *dev);
115 struct sk_buff *gfar_new_skb(struct net_device *dev);
116 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
117 struct sk_buff *skb);
118 static int gfar_set_mac_address(struct net_device *dev);
119 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
120 static irqreturn_t gfar_error(int irq, void *dev_id);
121 static irqreturn_t gfar_transmit(int irq, void *dev_id);
122 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
123 static void adjust_link(struct net_device *dev);
124 static noinline void gfar_update_link_state(struct gfar_private *priv);
125 static int init_phy(struct net_device *dev);
126 static int gfar_probe(struct platform_device *ofdev);
127 static int gfar_remove(struct platform_device *ofdev);
128 static void free_skb_resources(struct gfar_private *priv);
129 static void gfar_set_multi(struct net_device *dev);
130 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
131 static void gfar_configure_serdes(struct net_device *dev);
132 static int gfar_poll_rx(struct napi_struct *napi, int budget);
133 static int gfar_poll_tx(struct napi_struct *napi, int budget);
134 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
135 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
136 #ifdef CONFIG_NET_POLL_CONTROLLER
137 static void gfar_netpoll(struct net_device *dev);
139 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
140 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
141 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
142 int amount_pull, struct napi_struct *napi);
143 static void gfar_halt_nodisable(struct gfar_private *priv);
144 static void gfar_clear_exact_match(struct net_device *dev);
145 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
147 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
149 MODULE_AUTHOR("Freescale Semiconductor, Inc");
150 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
151 MODULE_LICENSE("GPL");
153 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
160 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
161 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
162 lstatus |= BD_LFLAG(RXBD_WRAP);
166 bdp->lstatus = lstatus;
169 static int gfar_init_bds(struct net_device *ndev)
171 struct gfar_private *priv = netdev_priv(ndev);
172 struct gfar_priv_tx_q *tx_queue = NULL;
173 struct gfar_priv_rx_q *rx_queue = NULL;
178 for (i = 0; i < priv->num_tx_queues; i++) {
179 tx_queue = priv->tx_queue[i];
180 /* Initialize some variables in our dev structure */
181 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
182 tx_queue->dirty_tx = tx_queue->tx_bd_base;
183 tx_queue->cur_tx = tx_queue->tx_bd_base;
184 tx_queue->skb_curtx = 0;
185 tx_queue->skb_dirtytx = 0;
187 /* Initialize Transmit Descriptor Ring */
188 txbdp = tx_queue->tx_bd_base;
189 for (j = 0; j < tx_queue->tx_ring_size; j++) {
195 /* Set the last descriptor in the ring to indicate wrap */
197 txbdp->status |= TXBD_WRAP;
200 for (i = 0; i < priv->num_rx_queues; i++) {
201 rx_queue = priv->rx_queue[i];
202 rx_queue->cur_rx = rx_queue->rx_bd_base;
203 rx_queue->skb_currx = 0;
204 rxbdp = rx_queue->rx_bd_base;
206 for (j = 0; j < rx_queue->rx_ring_size; j++) {
207 struct sk_buff *skb = rx_queue->rx_skbuff[j];
210 gfar_init_rxbdp(rx_queue, rxbdp,
213 skb = gfar_new_skb(ndev);
215 netdev_err(ndev, "Can't allocate RX buffers\n");
218 rx_queue->rx_skbuff[j] = skb;
220 gfar_new_rxbdp(rx_queue, rxbdp, skb);
231 static int gfar_alloc_skb_resources(struct net_device *ndev)
236 struct gfar_private *priv = netdev_priv(ndev);
237 struct device *dev = priv->dev;
238 struct gfar_priv_tx_q *tx_queue = NULL;
239 struct gfar_priv_rx_q *rx_queue = NULL;
241 priv->total_tx_ring_size = 0;
242 for (i = 0; i < priv->num_tx_queues; i++)
243 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
245 priv->total_rx_ring_size = 0;
246 for (i = 0; i < priv->num_rx_queues; i++)
247 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
249 /* Allocate memory for the buffer descriptors */
250 vaddr = dma_alloc_coherent(dev,
251 (priv->total_tx_ring_size *
252 sizeof(struct txbd8)) +
253 (priv->total_rx_ring_size *
254 sizeof(struct rxbd8)),
259 for (i = 0; i < priv->num_tx_queues; i++) {
260 tx_queue = priv->tx_queue[i];
261 tx_queue->tx_bd_base = vaddr;
262 tx_queue->tx_bd_dma_base = addr;
263 tx_queue->dev = ndev;
264 /* enet DMA only understands physical addresses */
265 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
266 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
269 /* Start the rx descriptor ring where the tx ring leaves off */
270 for (i = 0; i < priv->num_rx_queues; i++) {
271 rx_queue = priv->rx_queue[i];
272 rx_queue->rx_bd_base = vaddr;
273 rx_queue->rx_bd_dma_base = addr;
274 rx_queue->dev = ndev;
275 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
276 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
279 /* Setup the skbuff rings */
280 for (i = 0; i < priv->num_tx_queues; i++) {
281 tx_queue = priv->tx_queue[i];
282 tx_queue->tx_skbuff =
283 kmalloc_array(tx_queue->tx_ring_size,
284 sizeof(*tx_queue->tx_skbuff),
286 if (!tx_queue->tx_skbuff)
289 for (k = 0; k < tx_queue->tx_ring_size; k++)
290 tx_queue->tx_skbuff[k] = NULL;
293 for (i = 0; i < priv->num_rx_queues; i++) {
294 rx_queue = priv->rx_queue[i];
295 rx_queue->rx_skbuff =
296 kmalloc_array(rx_queue->rx_ring_size,
297 sizeof(*rx_queue->rx_skbuff),
299 if (!rx_queue->rx_skbuff)
302 for (j = 0; j < rx_queue->rx_ring_size; j++)
303 rx_queue->rx_skbuff[j] = NULL;
306 if (gfar_init_bds(ndev))
312 free_skb_resources(priv);
316 static void gfar_init_tx_rx_base(struct gfar_private *priv)
318 struct gfar __iomem *regs = priv->gfargrp[0].regs;
322 baddr = ®s->tbase0;
323 for (i = 0; i < priv->num_tx_queues; i++) {
324 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
328 baddr = ®s->rbase0;
329 for (i = 0; i < priv->num_rx_queues; i++) {
330 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
335 static void gfar_rx_buff_size_config(struct gfar_private *priv)
337 int frame_size = priv->ndev->mtu + ETH_HLEN;
339 /* set this when rx hw offload (TOE) functions are being used */
340 priv->uses_rxfcb = 0;
342 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
343 priv->uses_rxfcb = 1;
345 if (priv->hwts_rx_en)
346 priv->uses_rxfcb = 1;
348 if (priv->uses_rxfcb)
349 frame_size += GMAC_FCB_LEN;
351 frame_size += priv->padding;
353 frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
354 INCREMENTAL_BUFFER_SIZE;
356 priv->rx_buffer_size = frame_size;
359 static void gfar_mac_rx_config(struct gfar_private *priv)
361 struct gfar __iomem *regs = priv->gfargrp[0].regs;
364 if (priv->rx_filer_enable) {
365 rctrl |= RCTRL_FILREN;
366 /* Program the RIR0 reg with the required distribution */
367 if (priv->poll_mode == GFAR_SQ_POLLING)
368 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
369 else /* GFAR_MQ_POLLING */
370 gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0);
373 /* Restore PROMISC mode */
374 if (priv->ndev->flags & IFF_PROMISC)
377 if (priv->ndev->features & NETIF_F_RXCSUM)
378 rctrl |= RCTRL_CHECKSUMMING;
380 if (priv->extended_hash)
381 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
384 rctrl &= ~RCTRL_PAL_MASK;
385 rctrl |= RCTRL_PADDING(priv->padding);
388 /* Enable HW time stamping if requested from user space */
389 if (priv->hwts_rx_en)
390 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
392 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
393 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
395 /* Init rctrl based on our settings */
396 gfar_write(®s->rctrl, rctrl);
399 static void gfar_mac_tx_config(struct gfar_private *priv)
401 struct gfar __iomem *regs = priv->gfargrp[0].regs;
404 if (priv->ndev->features & NETIF_F_IP_CSUM)
405 tctrl |= TCTRL_INIT_CSUM;
407 if (priv->prio_sched_en)
408 tctrl |= TCTRL_TXSCHED_PRIO;
410 tctrl |= TCTRL_TXSCHED_WRRS;
411 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
412 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
415 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
416 tctrl |= TCTRL_VLINS;
418 gfar_write(®s->tctrl, tctrl);
421 static void gfar_configure_coalescing(struct gfar_private *priv,
422 unsigned long tx_mask, unsigned long rx_mask)
424 struct gfar __iomem *regs = priv->gfargrp[0].regs;
427 if (priv->mode == MQ_MG_MODE) {
430 baddr = ®s->txic0;
431 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
432 gfar_write(baddr + i, 0);
433 if (likely(priv->tx_queue[i]->txcoalescing))
434 gfar_write(baddr + i, priv->tx_queue[i]->txic);
437 baddr = ®s->rxic0;
438 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
439 gfar_write(baddr + i, 0);
440 if (likely(priv->rx_queue[i]->rxcoalescing))
441 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
444 /* Backward compatible case -- even if we enable
445 * multiple queues, there's only single reg to program
447 gfar_write(®s->txic, 0);
448 if (likely(priv->tx_queue[0]->txcoalescing))
449 gfar_write(®s->txic, priv->tx_queue[0]->txic);
451 gfar_write(®s->rxic, 0);
452 if (unlikely(priv->rx_queue[0]->rxcoalescing))
453 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
457 void gfar_configure_coalescing_all(struct gfar_private *priv)
459 gfar_configure_coalescing(priv, 0xFF, 0xFF);
462 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
464 struct gfar_private *priv = netdev_priv(dev);
465 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
466 unsigned long tx_packets = 0, tx_bytes = 0;
469 for (i = 0; i < priv->num_rx_queues; i++) {
470 rx_packets += priv->rx_queue[i]->stats.rx_packets;
471 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
472 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
475 dev->stats.rx_packets = rx_packets;
476 dev->stats.rx_bytes = rx_bytes;
477 dev->stats.rx_dropped = rx_dropped;
479 for (i = 0; i < priv->num_tx_queues; i++) {
480 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
481 tx_packets += priv->tx_queue[i]->stats.tx_packets;
484 dev->stats.tx_bytes = tx_bytes;
485 dev->stats.tx_packets = tx_packets;
490 static const struct net_device_ops gfar_netdev_ops = {
491 .ndo_open = gfar_enet_open,
492 .ndo_start_xmit = gfar_start_xmit,
493 .ndo_stop = gfar_close,
494 .ndo_change_mtu = gfar_change_mtu,
495 .ndo_set_features = gfar_set_features,
496 .ndo_set_rx_mode = gfar_set_multi,
497 .ndo_tx_timeout = gfar_timeout,
498 .ndo_do_ioctl = gfar_ioctl,
499 .ndo_get_stats = gfar_get_stats,
500 .ndo_set_mac_address = eth_mac_addr,
501 .ndo_validate_addr = eth_validate_addr,
502 #ifdef CONFIG_NET_POLL_CONTROLLER
503 .ndo_poll_controller = gfar_netpoll,
507 static void gfar_ints_disable(struct gfar_private *priv)
510 for (i = 0; i < priv->num_grps; i++) {
511 struct gfar __iomem *regs = priv->gfargrp[i].regs;
513 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
515 /* Initialize IMASK */
516 gfar_write(®s->imask, IMASK_INIT_CLEAR);
520 static void gfar_ints_enable(struct gfar_private *priv)
523 for (i = 0; i < priv->num_grps; i++) {
524 struct gfar __iomem *regs = priv->gfargrp[i].regs;
525 /* Unmask the interrupts we look for */
526 gfar_write(®s->imask, IMASK_DEFAULT);
530 void lock_tx_qs(struct gfar_private *priv)
534 for (i = 0; i < priv->num_tx_queues; i++)
535 spin_lock(&priv->tx_queue[i]->txlock);
538 void unlock_tx_qs(struct gfar_private *priv)
542 for (i = 0; i < priv->num_tx_queues; i++)
543 spin_unlock(&priv->tx_queue[i]->txlock);
546 static int gfar_alloc_tx_queues(struct gfar_private *priv)
550 for (i = 0; i < priv->num_tx_queues; i++) {
551 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
553 if (!priv->tx_queue[i])
556 priv->tx_queue[i]->tx_skbuff = NULL;
557 priv->tx_queue[i]->qindex = i;
558 priv->tx_queue[i]->dev = priv->ndev;
559 spin_lock_init(&(priv->tx_queue[i]->txlock));
564 static int gfar_alloc_rx_queues(struct gfar_private *priv)
568 for (i = 0; i < priv->num_rx_queues; i++) {
569 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
571 if (!priv->rx_queue[i])
574 priv->rx_queue[i]->rx_skbuff = NULL;
575 priv->rx_queue[i]->qindex = i;
576 priv->rx_queue[i]->dev = priv->ndev;
581 static void gfar_free_tx_queues(struct gfar_private *priv)
585 for (i = 0; i < priv->num_tx_queues; i++)
586 kfree(priv->tx_queue[i]);
589 static void gfar_free_rx_queues(struct gfar_private *priv)
593 for (i = 0; i < priv->num_rx_queues; i++)
594 kfree(priv->rx_queue[i]);
597 static void unmap_group_regs(struct gfar_private *priv)
601 for (i = 0; i < MAXGROUPS; i++)
602 if (priv->gfargrp[i].regs)
603 iounmap(priv->gfargrp[i].regs);
606 static void free_gfar_dev(struct gfar_private *priv)
610 for (i = 0; i < priv->num_grps; i++)
611 for (j = 0; j < GFAR_NUM_IRQS; j++) {
612 kfree(priv->gfargrp[i].irqinfo[j]);
613 priv->gfargrp[i].irqinfo[j] = NULL;
616 free_netdev(priv->ndev);
619 static void disable_napi(struct gfar_private *priv)
623 for (i = 0; i < priv->num_grps; i++) {
624 napi_disable(&priv->gfargrp[i].napi_rx);
625 napi_disable(&priv->gfargrp[i].napi_tx);
629 static void enable_napi(struct gfar_private *priv)
633 for (i = 0; i < priv->num_grps; i++) {
634 napi_enable(&priv->gfargrp[i].napi_rx);
635 napi_enable(&priv->gfargrp[i].napi_tx);
639 static int gfar_parse_group(struct device_node *np,
640 struct gfar_private *priv, const char *model)
642 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
645 for (i = 0; i < GFAR_NUM_IRQS; i++) {
646 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
648 if (!grp->irqinfo[i])
652 grp->regs = of_iomap(np, 0);
656 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
658 /* If we aren't the FEC we have multiple interrupts */
659 if (model && strcasecmp(model, "FEC")) {
660 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
661 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
662 if (gfar_irq(grp, TX)->irq == NO_IRQ ||
663 gfar_irq(grp, RX)->irq == NO_IRQ ||
664 gfar_irq(grp, ER)->irq == NO_IRQ)
669 spin_lock_init(&grp->grplock);
670 if (priv->mode == MQ_MG_MODE) {
671 u32 *rxq_mask, *txq_mask;
672 rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
673 txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
675 if (priv->poll_mode == GFAR_SQ_POLLING) {
676 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
677 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
678 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
679 } else { /* GFAR_MQ_POLLING */
680 grp->rx_bit_map = rxq_mask ?
681 *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
682 grp->tx_bit_map = txq_mask ?
683 *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
686 grp->rx_bit_map = 0xFF;
687 grp->tx_bit_map = 0xFF;
690 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
691 * right to left, so we need to revert the 8 bits to get the q index
693 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
694 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
696 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
697 * also assign queues to groups
699 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
701 grp->rx_queue = priv->rx_queue[i];
702 grp->num_rx_queues++;
703 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
704 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
705 priv->rx_queue[i]->grp = grp;
708 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
710 grp->tx_queue = priv->tx_queue[i];
711 grp->num_tx_queues++;
712 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
713 priv->tqueue |= (TQUEUE_EN0 >> i);
714 priv->tx_queue[i]->grp = grp;
722 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
726 const void *mac_addr;
728 struct net_device *dev = NULL;
729 struct gfar_private *priv = NULL;
730 struct device_node *np = ofdev->dev.of_node;
731 struct device_node *child = NULL;
733 const u32 *stash_len;
734 const u32 *stash_idx;
735 unsigned int num_tx_qs, num_rx_qs;
736 u32 *tx_queues, *rx_queues;
737 unsigned short mode, poll_mode;
739 if (!np || !of_device_is_available(np))
742 if (of_device_is_compatible(np, "fsl,etsec2")) {
744 poll_mode = GFAR_SQ_POLLING;
747 poll_mode = GFAR_SQ_POLLING;
750 /* parse the num of HW tx and rx queues */
751 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
752 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
754 if (mode == SQ_SG_MODE) {
757 } else { /* MQ_MG_MODE */
758 /* get the actual number of supported groups */
759 unsigned int num_grps = of_get_available_child_count(np);
761 if (num_grps == 0 || num_grps > MAXGROUPS) {
762 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
764 pr_err("Cannot do alloc_etherdev, aborting\n");
768 if (poll_mode == GFAR_SQ_POLLING) {
769 num_tx_qs = num_grps; /* one txq per int group */
770 num_rx_qs = num_grps; /* one rxq per int group */
771 } else { /* GFAR_MQ_POLLING */
772 num_tx_qs = tx_queues ? *tx_queues : 1;
773 num_rx_qs = rx_queues ? *rx_queues : 1;
777 if (num_tx_qs > MAX_TX_QS) {
778 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
779 num_tx_qs, MAX_TX_QS);
780 pr_err("Cannot do alloc_etherdev, aborting\n");
784 if (num_rx_qs > MAX_RX_QS) {
785 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
786 num_rx_qs, MAX_RX_QS);
787 pr_err("Cannot do alloc_etherdev, aborting\n");
791 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
796 priv = netdev_priv(dev);
800 priv->poll_mode = poll_mode;
802 priv->num_tx_queues = num_tx_qs;
803 netif_set_real_num_rx_queues(dev, num_rx_qs);
804 priv->num_rx_queues = num_rx_qs;
806 err = gfar_alloc_tx_queues(priv);
808 goto tx_alloc_failed;
810 err = gfar_alloc_rx_queues(priv);
812 goto rx_alloc_failed;
814 /* Init Rx queue filer rule set linked list */
815 INIT_LIST_HEAD(&priv->rx_list.list);
816 priv->rx_list.count = 0;
817 mutex_init(&priv->rx_queue_access);
819 model = of_get_property(np, "model", NULL);
821 for (i = 0; i < MAXGROUPS; i++)
822 priv->gfargrp[i].regs = NULL;
824 /* Parse and initialize group specific information */
825 if (priv->mode == MQ_MG_MODE) {
826 for_each_child_of_node(np, child) {
827 err = gfar_parse_group(child, priv, model);
831 } else { /* SQ_SG_MODE */
832 err = gfar_parse_group(np, priv, model);
837 stash = of_get_property(np, "bd-stash", NULL);
840 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
841 priv->bd_stash_en = 1;
844 stash_len = of_get_property(np, "rx-stash-len", NULL);
847 priv->rx_stash_size = *stash_len;
849 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
852 priv->rx_stash_index = *stash_idx;
854 if (stash_len || stash_idx)
855 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
857 mac_addr = of_get_mac_address(np);
860 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
862 if (model && !strcasecmp(model, "TSEC"))
863 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
864 FSL_GIANFAR_DEV_HAS_COALESCE |
865 FSL_GIANFAR_DEV_HAS_RMON |
866 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
868 if (model && !strcasecmp(model, "eTSEC"))
869 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
870 FSL_GIANFAR_DEV_HAS_COALESCE |
871 FSL_GIANFAR_DEV_HAS_RMON |
872 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
873 FSL_GIANFAR_DEV_HAS_CSUM |
874 FSL_GIANFAR_DEV_HAS_VLAN |
875 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
876 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
877 FSL_GIANFAR_DEV_HAS_TIMER;
879 ctype = of_get_property(np, "phy-connection-type", NULL);
881 /* We only care about rgmii-id. The rest are autodetected */
882 if (ctype && !strcmp(ctype, "rgmii-id"))
883 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
885 priv->interface = PHY_INTERFACE_MODE_MII;
887 if (of_get_property(np, "fsl,magic-packet", NULL))
888 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
890 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
892 /* In the case of a fixed PHY, the DT node associated
893 * to the PHY is the Ethernet MAC DT node.
895 if (of_phy_is_fixed_link(np)) {
896 err = of_phy_register_fixed_link(np);
903 /* Find the TBI PHY. If it's not there, we don't support SGMII */
904 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
909 unmap_group_regs(priv);
911 gfar_free_rx_queues(priv);
913 gfar_free_tx_queues(priv);
918 static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
920 struct hwtstamp_config config;
921 struct gfar_private *priv = netdev_priv(netdev);
923 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
926 /* reserved for future extensions */
930 switch (config.tx_type) {
931 case HWTSTAMP_TX_OFF:
932 priv->hwts_tx_en = 0;
935 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
937 priv->hwts_tx_en = 1;
943 switch (config.rx_filter) {
944 case HWTSTAMP_FILTER_NONE:
945 if (priv->hwts_rx_en) {
946 priv->hwts_rx_en = 0;
951 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
953 if (!priv->hwts_rx_en) {
954 priv->hwts_rx_en = 1;
957 config.rx_filter = HWTSTAMP_FILTER_ALL;
961 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
965 static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
967 struct hwtstamp_config config;
968 struct gfar_private *priv = netdev_priv(netdev);
971 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
972 config.rx_filter = (priv->hwts_rx_en ?
973 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
975 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
979 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
981 struct gfar_private *priv = netdev_priv(dev);
983 if (!netif_running(dev))
986 if (cmd == SIOCSHWTSTAMP)
987 return gfar_hwtstamp_set(dev, rq);
988 if (cmd == SIOCGHWTSTAMP)
989 return gfar_hwtstamp_get(dev, rq);
994 return phy_mii_ioctl(priv->phydev, rq, cmd);
997 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1000 u32 rqfpr = FPR_FILER_MASK;
1004 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
1005 priv->ftp_rqfpr[rqfar] = rqfpr;
1006 priv->ftp_rqfcr[rqfar] = rqfcr;
1007 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1010 rqfcr = RQFCR_CMP_NOMATCH;
1011 priv->ftp_rqfpr[rqfar] = rqfpr;
1012 priv->ftp_rqfcr[rqfar] = rqfcr;
1013 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1016 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1018 priv->ftp_rqfcr[rqfar] = rqfcr;
1019 priv->ftp_rqfpr[rqfar] = rqfpr;
1020 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1023 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1025 priv->ftp_rqfcr[rqfar] = rqfcr;
1026 priv->ftp_rqfpr[rqfar] = rqfpr;
1027 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1032 static void gfar_init_filer_table(struct gfar_private *priv)
1035 u32 rqfar = MAX_FILER_IDX;
1037 u32 rqfpr = FPR_FILER_MASK;
1040 rqfcr = RQFCR_CMP_MATCH;
1041 priv->ftp_rqfcr[rqfar] = rqfcr;
1042 priv->ftp_rqfpr[rqfar] = rqfpr;
1043 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1045 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1046 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1047 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1048 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1049 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1050 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1052 /* cur_filer_idx indicated the first non-masked rule */
1053 priv->cur_filer_idx = rqfar;
1055 /* Rest are masked rules */
1056 rqfcr = RQFCR_CMP_NOMATCH;
1057 for (i = 0; i < rqfar; i++) {
1058 priv->ftp_rqfcr[i] = rqfcr;
1059 priv->ftp_rqfpr[i] = rqfpr;
1060 gfar_write_filer(priv, i, rqfcr, rqfpr);
1064 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1066 unsigned int pvr = mfspr(SPRN_PVR);
1067 unsigned int svr = mfspr(SPRN_SVR);
1068 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1069 unsigned int rev = svr & 0xffff;
1071 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1072 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1073 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1074 priv->errata |= GFAR_ERRATA_74;
1076 /* MPC8313 and MPC837x all rev */
1077 if ((pvr == 0x80850010 && mod == 0x80b0) ||
1078 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1079 priv->errata |= GFAR_ERRATA_76;
1081 /* MPC8313 Rev < 2.0 */
1082 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1083 priv->errata |= GFAR_ERRATA_12;
1086 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1088 unsigned int svr = mfspr(SPRN_SVR);
1090 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1091 priv->errata |= GFAR_ERRATA_12;
1092 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1093 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1094 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1097 static void gfar_detect_errata(struct gfar_private *priv)
1099 struct device *dev = &priv->ofdev->dev;
1101 /* no plans to fix */
1102 priv->errata |= GFAR_ERRATA_A002;
1104 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1105 __gfar_detect_errata_85xx(priv);
1106 else /* non-mpc85xx parts, i.e. e300 core based */
1107 __gfar_detect_errata_83xx(priv);
1110 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1114 void gfar_mac_reset(struct gfar_private *priv)
1116 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1119 /* Reset MAC layer */
1120 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
1122 /* We need to delay at least 3 TX clocks */
1125 /* the soft reset bit is not self-resetting, so we need to
1126 * clear it before resuming normal operation
1128 gfar_write(®s->maccfg1, 0);
1132 /* Compute rx_buff_size based on config flags */
1133 gfar_rx_buff_size_config(priv);
1135 /* Initialize the max receive frame/buffer lengths */
1136 gfar_write(®s->maxfrm, priv->rx_buffer_size);
1137 gfar_write(®s->mrblr, priv->rx_buffer_size);
1139 /* Initialize the Minimum Frame Length Register */
1140 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
1142 /* Initialize MACCFG2. */
1143 tempval = MACCFG2_INIT_SETTINGS;
1145 /* If the mtu is larger than the max size for standard
1146 * ethernet frames (ie, a jumbo frame), then set maccfg2
1147 * to allow huge frames, and to check the length
1149 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1150 gfar_has_errata(priv, GFAR_ERRATA_74))
1151 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1153 gfar_write(®s->maccfg2, tempval);
1155 /* Clear mac addr hash registers */
1156 gfar_write(®s->igaddr0, 0);
1157 gfar_write(®s->igaddr1, 0);
1158 gfar_write(®s->igaddr2, 0);
1159 gfar_write(®s->igaddr3, 0);
1160 gfar_write(®s->igaddr4, 0);
1161 gfar_write(®s->igaddr5, 0);
1162 gfar_write(®s->igaddr6, 0);
1163 gfar_write(®s->igaddr7, 0);
1165 gfar_write(®s->gaddr0, 0);
1166 gfar_write(®s->gaddr1, 0);
1167 gfar_write(®s->gaddr2, 0);
1168 gfar_write(®s->gaddr3, 0);
1169 gfar_write(®s->gaddr4, 0);
1170 gfar_write(®s->gaddr5, 0);
1171 gfar_write(®s->gaddr6, 0);
1172 gfar_write(®s->gaddr7, 0);
1174 if (priv->extended_hash)
1175 gfar_clear_exact_match(priv->ndev);
1177 gfar_mac_rx_config(priv);
1179 gfar_mac_tx_config(priv);
1181 gfar_set_mac_address(priv->ndev);
1183 gfar_set_multi(priv->ndev);
1185 /* clear ievent and imask before configuring coalescing */
1186 gfar_ints_disable(priv);
1188 /* Configure the coalescing support */
1189 gfar_configure_coalescing_all(priv);
1192 static void gfar_hw_init(struct gfar_private *priv)
1194 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1197 /* Stop the DMA engine now, in case it was running before
1198 * (The firmware could have used it, and left it running).
1202 gfar_mac_reset(priv);
1204 /* Zero out the rmon mib registers if it has them */
1205 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1206 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1208 /* Mask off the CAM interrupts */
1209 gfar_write(®s->rmon.cam1, 0xffffffff);
1210 gfar_write(®s->rmon.cam2, 0xffffffff);
1213 /* Initialize ECNTRL */
1214 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
1216 /* Set the extraction length and index */
1217 attrs = ATTRELI_EL(priv->rx_stash_size) |
1218 ATTRELI_EI(priv->rx_stash_index);
1220 gfar_write(®s->attreli, attrs);
1222 /* Start with defaults, and add stashing
1223 * depending on driver parameters
1225 attrs = ATTR_INIT_SETTINGS;
1227 if (priv->bd_stash_en)
1228 attrs |= ATTR_BDSTASH;
1230 if (priv->rx_stash_size != 0)
1231 attrs |= ATTR_BUFSTASH;
1233 gfar_write(®s->attr, attrs);
1236 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1237 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1238 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1240 /* Program the interrupt steering regs, only for MG devices */
1241 if (priv->num_grps > 1)
1242 gfar_write_isrg(priv);
1245 static void gfar_init_addr_hash_table(struct gfar_private *priv)
1247 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1249 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1250 priv->extended_hash = 1;
1251 priv->hash_width = 9;
1253 priv->hash_regs[0] = ®s->igaddr0;
1254 priv->hash_regs[1] = ®s->igaddr1;
1255 priv->hash_regs[2] = ®s->igaddr2;
1256 priv->hash_regs[3] = ®s->igaddr3;
1257 priv->hash_regs[4] = ®s->igaddr4;
1258 priv->hash_regs[5] = ®s->igaddr5;
1259 priv->hash_regs[6] = ®s->igaddr6;
1260 priv->hash_regs[7] = ®s->igaddr7;
1261 priv->hash_regs[8] = ®s->gaddr0;
1262 priv->hash_regs[9] = ®s->gaddr1;
1263 priv->hash_regs[10] = ®s->gaddr2;
1264 priv->hash_regs[11] = ®s->gaddr3;
1265 priv->hash_regs[12] = ®s->gaddr4;
1266 priv->hash_regs[13] = ®s->gaddr5;
1267 priv->hash_regs[14] = ®s->gaddr6;
1268 priv->hash_regs[15] = ®s->gaddr7;
1271 priv->extended_hash = 0;
1272 priv->hash_width = 8;
1274 priv->hash_regs[0] = ®s->gaddr0;
1275 priv->hash_regs[1] = ®s->gaddr1;
1276 priv->hash_regs[2] = ®s->gaddr2;
1277 priv->hash_regs[3] = ®s->gaddr3;
1278 priv->hash_regs[4] = ®s->gaddr4;
1279 priv->hash_regs[5] = ®s->gaddr5;
1280 priv->hash_regs[6] = ®s->gaddr6;
1281 priv->hash_regs[7] = ®s->gaddr7;
1285 /* Set up the ethernet device structure, private data,
1286 * and anything else we need before we start
1288 static int gfar_probe(struct platform_device *ofdev)
1290 struct net_device *dev = NULL;
1291 struct gfar_private *priv = NULL;
1294 err = gfar_of_init(ofdev, &dev);
1299 priv = netdev_priv(dev);
1301 priv->ofdev = ofdev;
1302 priv->dev = &ofdev->dev;
1303 SET_NETDEV_DEV(dev, &ofdev->dev);
1305 spin_lock_init(&priv->bflock);
1306 INIT_WORK(&priv->reset_task, gfar_reset_task);
1308 platform_set_drvdata(ofdev, priv);
1310 gfar_detect_errata(priv);
1312 /* Set the dev->base_addr to the gfar reg region */
1313 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1315 /* Fill in the dev structure */
1316 dev->watchdog_timeo = TX_TIMEOUT;
1318 dev->netdev_ops = &gfar_netdev_ops;
1319 dev->ethtool_ops = &gfar_ethtool_ops;
1321 /* Register for napi ...We are registering NAPI for each grp */
1322 for (i = 0; i < priv->num_grps; i++) {
1323 if (priv->poll_mode == GFAR_SQ_POLLING) {
1324 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1325 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1326 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1327 gfar_poll_tx_sq, 2);
1329 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1330 gfar_poll_rx, GFAR_DEV_WEIGHT);
1331 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1336 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1337 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1339 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1340 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1343 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1344 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1345 NETIF_F_HW_VLAN_CTAG_RX;
1346 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1349 gfar_init_addr_hash_table(priv);
1351 /* Insert receive time stamps into padding alignment bytes */
1352 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1355 if (dev->features & NETIF_F_IP_CSUM ||
1356 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1357 dev->needed_headroom = GMAC_FCB_LEN;
1359 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1361 /* Initializing some of the rx/tx queue level parameters */
1362 for (i = 0; i < priv->num_tx_queues; i++) {
1363 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1364 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1365 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1366 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1369 for (i = 0; i < priv->num_rx_queues; i++) {
1370 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1371 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1372 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1375 /* always enable rx filer */
1376 priv->rx_filer_enable = 1;
1377 /* Enable most messages by default */
1378 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1379 /* use pritority h/w tx queue scheduling for single queue devices */
1380 if (priv->num_tx_queues == 1)
1381 priv->prio_sched_en = 1;
1383 set_bit(GFAR_DOWN, &priv->state);
1387 /* Carrier starts down, phylib will bring it up */
1388 netif_carrier_off(dev);
1390 err = register_netdev(dev);
1393 pr_err("%s: Cannot register net device, aborting\n", dev->name);
1397 device_init_wakeup(&dev->dev,
1398 priv->device_flags &
1399 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1401 /* fill out IRQ number and name fields */
1402 for (i = 0; i < priv->num_grps; i++) {
1403 struct gfar_priv_grp *grp = &priv->gfargrp[i];
1404 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1405 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1406 dev->name, "_g", '0' + i, "_tx");
1407 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1408 dev->name, "_g", '0' + i, "_rx");
1409 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1410 dev->name, "_g", '0' + i, "_er");
1412 strcpy(gfar_irq(grp, TX)->name, dev->name);
1415 /* Initialize the filer table */
1416 gfar_init_filer_table(priv);
1418 /* Print out the device info */
1419 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1421 /* Even more device info helps when determining which kernel
1422 * provided which set of benchmarks.
1424 netdev_info(dev, "Running with NAPI enabled\n");
1425 for (i = 0; i < priv->num_rx_queues; i++)
1426 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1427 i, priv->rx_queue[i]->rx_ring_size);
1428 for (i = 0; i < priv->num_tx_queues; i++)
1429 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1430 i, priv->tx_queue[i]->tx_ring_size);
1435 unmap_group_regs(priv);
1436 gfar_free_rx_queues(priv);
1437 gfar_free_tx_queues(priv);
1439 of_node_put(priv->phy_node);
1441 of_node_put(priv->tbi_node);
1442 free_gfar_dev(priv);
1446 static int gfar_remove(struct platform_device *ofdev)
1448 struct gfar_private *priv = platform_get_drvdata(ofdev);
1451 of_node_put(priv->phy_node);
1453 of_node_put(priv->tbi_node);
1455 unregister_netdev(priv->ndev);
1456 unmap_group_regs(priv);
1457 gfar_free_rx_queues(priv);
1458 gfar_free_tx_queues(priv);
1459 free_gfar_dev(priv);
1466 static int gfar_suspend(struct device *dev)
1468 struct gfar_private *priv = dev_get_drvdata(dev);
1469 struct net_device *ndev = priv->ndev;
1470 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1471 unsigned long flags;
1474 int magic_packet = priv->wol_en &&
1475 (priv->device_flags &
1476 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1478 netif_device_detach(ndev);
1480 if (netif_running(ndev)) {
1482 local_irq_save(flags);
1485 gfar_halt_nodisable(priv);
1487 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1488 tempval = gfar_read(®s->maccfg1);
1490 tempval &= ~MACCFG1_TX_EN;
1493 tempval &= ~MACCFG1_RX_EN;
1495 gfar_write(®s->maccfg1, tempval);
1498 local_irq_restore(flags);
1503 /* Enable interrupt on Magic Packet */
1504 gfar_write(®s->imask, IMASK_MAG);
1506 /* Enable Magic Packet mode */
1507 tempval = gfar_read(®s->maccfg2);
1508 tempval |= MACCFG2_MPEN;
1509 gfar_write(®s->maccfg2, tempval);
1511 phy_stop(priv->phydev);
1518 static int gfar_resume(struct device *dev)
1520 struct gfar_private *priv = dev_get_drvdata(dev);
1521 struct net_device *ndev = priv->ndev;
1522 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1523 unsigned long flags;
1525 int magic_packet = priv->wol_en &&
1526 (priv->device_flags &
1527 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1529 if (!netif_running(ndev)) {
1530 netif_device_attach(ndev);
1534 if (!magic_packet && priv->phydev)
1535 phy_start(priv->phydev);
1537 /* Disable Magic Packet mode, in case something
1540 local_irq_save(flags);
1543 tempval = gfar_read(®s->maccfg2);
1544 tempval &= ~MACCFG2_MPEN;
1545 gfar_write(®s->maccfg2, tempval);
1550 local_irq_restore(flags);
1552 netif_device_attach(ndev);
1559 static int gfar_restore(struct device *dev)
1561 struct gfar_private *priv = dev_get_drvdata(dev);
1562 struct net_device *ndev = priv->ndev;
1564 if (!netif_running(ndev)) {
1565 netif_device_attach(ndev);
1570 if (gfar_init_bds(ndev)) {
1571 free_skb_resources(priv);
1575 gfar_mac_reset(priv);
1577 gfar_init_tx_rx_base(priv);
1583 priv->oldduplex = -1;
1586 phy_start(priv->phydev);
1588 netif_device_attach(ndev);
1594 static struct dev_pm_ops gfar_pm_ops = {
1595 .suspend = gfar_suspend,
1596 .resume = gfar_resume,
1597 .freeze = gfar_suspend,
1598 .thaw = gfar_resume,
1599 .restore = gfar_restore,
1602 #define GFAR_PM_OPS (&gfar_pm_ops)
1606 #define GFAR_PM_OPS NULL
1610 /* Reads the controller's registers to determine what interface
1611 * connects it to the PHY.
1613 static phy_interface_t gfar_get_interface(struct net_device *dev)
1615 struct gfar_private *priv = netdev_priv(dev);
1616 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1619 ecntrl = gfar_read(®s->ecntrl);
1621 if (ecntrl & ECNTRL_SGMII_MODE)
1622 return PHY_INTERFACE_MODE_SGMII;
1624 if (ecntrl & ECNTRL_TBI_MODE) {
1625 if (ecntrl & ECNTRL_REDUCED_MODE)
1626 return PHY_INTERFACE_MODE_RTBI;
1628 return PHY_INTERFACE_MODE_TBI;
1631 if (ecntrl & ECNTRL_REDUCED_MODE) {
1632 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1633 return PHY_INTERFACE_MODE_RMII;
1636 phy_interface_t interface = priv->interface;
1638 /* This isn't autodetected right now, so it must
1639 * be set by the device tree or platform code.
1641 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1642 return PHY_INTERFACE_MODE_RGMII_ID;
1644 return PHY_INTERFACE_MODE_RGMII;
1648 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1649 return PHY_INTERFACE_MODE_GMII;
1651 return PHY_INTERFACE_MODE_MII;
1655 /* Initializes driver's PHY state, and attaches to the PHY.
1656 * Returns 0 on success.
1658 static int init_phy(struct net_device *dev)
1660 struct gfar_private *priv = netdev_priv(dev);
1661 uint gigabit_support =
1662 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1663 GFAR_SUPPORTED_GBIT : 0;
1664 phy_interface_t interface;
1668 priv->oldduplex = -1;
1670 interface = gfar_get_interface(dev);
1672 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1674 if (!priv->phydev) {
1675 dev_err(&dev->dev, "could not attach to PHY\n");
1679 if (interface == PHY_INTERFACE_MODE_SGMII)
1680 gfar_configure_serdes(dev);
1682 /* Remove any features not supported by the controller */
1683 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1684 priv->phydev->advertising = priv->phydev->supported;
1689 /* Initialize TBI PHY interface for communicating with the
1690 * SERDES lynx PHY on the chip. We communicate with this PHY
1691 * through the MDIO bus on each controller, treating it as a
1692 * "normal" PHY at the address found in the TBIPA register. We assume
1693 * that the TBIPA register is valid. Either the MDIO bus code will set
1694 * it to a value that doesn't conflict with other PHYs on the bus, or the
1695 * value doesn't matter, as there are no other PHYs on the bus.
1697 static void gfar_configure_serdes(struct net_device *dev)
1699 struct gfar_private *priv = netdev_priv(dev);
1700 struct phy_device *tbiphy;
1702 if (!priv->tbi_node) {
1703 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1704 "device tree specify a tbi-handle\n");
1708 tbiphy = of_phy_find_device(priv->tbi_node);
1710 dev_err(&dev->dev, "error: Could not get TBI device\n");
1714 /* If the link is already up, we must already be ok, and don't need to
1715 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1716 * everything for us? Resetting it takes the link down and requires
1717 * several seconds for it to come back.
1719 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1722 /* Single clk mode, mii mode off(for serdes communication) */
1723 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1725 phy_write(tbiphy, MII_ADVERTISE,
1726 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1727 ADVERTISE_1000XPSE_ASYM);
1729 phy_write(tbiphy, MII_BMCR,
1730 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1734 static int __gfar_is_rx_idle(struct gfar_private *priv)
1738 /* Normaly TSEC should not hang on GRS commands, so we should
1739 * actually wait for IEVENT_GRSC flag.
1741 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1744 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1745 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1746 * and the Rx can be safely reset.
1748 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1750 if ((res & 0xffff) == (res >> 16))
1756 /* Halt the receive and transmit queues */
1757 static void gfar_halt_nodisable(struct gfar_private *priv)
1759 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1762 gfar_ints_disable(priv);
1764 /* Stop the DMA, and wait for it to stop */
1765 tempval = gfar_read(®s->dmactrl);
1766 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1767 (DMACTRL_GRS | DMACTRL_GTS)) {
1770 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1771 gfar_write(®s->dmactrl, tempval);
1774 ret = spin_event_timeout(((gfar_read(®s->ievent) &
1775 (IEVENT_GRSC | IEVENT_GTSC)) ==
1776 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1777 if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC))
1778 ret = __gfar_is_rx_idle(priv);
1783 /* Halt the receive and transmit queues */
1784 void gfar_halt(struct gfar_private *priv)
1786 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1789 /* Dissable the Rx/Tx hw queues */
1790 gfar_write(®s->rqueue, 0);
1791 gfar_write(®s->tqueue, 0);
1795 gfar_halt_nodisable(priv);
1797 /* Disable Rx/Tx DMA */
1798 tempval = gfar_read(®s->maccfg1);
1799 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1800 gfar_write(®s->maccfg1, tempval);
1803 void stop_gfar(struct net_device *dev)
1805 struct gfar_private *priv = netdev_priv(dev);
1807 netif_tx_stop_all_queues(dev);
1809 smp_mb__before_clear_bit();
1810 set_bit(GFAR_DOWN, &priv->state);
1811 smp_mb__after_clear_bit();
1815 /* disable ints and gracefully shut down Rx/Tx DMA */
1818 phy_stop(priv->phydev);
1820 free_skb_resources(priv);
1823 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1825 struct txbd8 *txbdp;
1826 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1829 txbdp = tx_queue->tx_bd_base;
1831 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1832 if (!tx_queue->tx_skbuff[i])
1835 dma_unmap_single(priv->dev, txbdp->bufPtr,
1836 txbdp->length, DMA_TO_DEVICE);
1838 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1841 dma_unmap_page(priv->dev, txbdp->bufPtr,
1842 txbdp->length, DMA_TO_DEVICE);
1845 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1846 tx_queue->tx_skbuff[i] = NULL;
1848 kfree(tx_queue->tx_skbuff);
1849 tx_queue->tx_skbuff = NULL;
1852 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1854 struct rxbd8 *rxbdp;
1855 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1858 rxbdp = rx_queue->rx_bd_base;
1860 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1861 if (rx_queue->rx_skbuff[i]) {
1862 dma_unmap_single(priv->dev, rxbdp->bufPtr,
1863 priv->rx_buffer_size,
1865 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1866 rx_queue->rx_skbuff[i] = NULL;
1872 kfree(rx_queue->rx_skbuff);
1873 rx_queue->rx_skbuff = NULL;
1876 /* If there are any tx skbs or rx skbs still around, free them.
1877 * Then free tx_skbuff and rx_skbuff
1879 static void free_skb_resources(struct gfar_private *priv)
1881 struct gfar_priv_tx_q *tx_queue = NULL;
1882 struct gfar_priv_rx_q *rx_queue = NULL;
1885 /* Go through all the buffer descriptors and free their data buffers */
1886 for (i = 0; i < priv->num_tx_queues; i++) {
1887 struct netdev_queue *txq;
1889 tx_queue = priv->tx_queue[i];
1890 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1891 if (tx_queue->tx_skbuff)
1892 free_skb_tx_queue(tx_queue);
1893 netdev_tx_reset_queue(txq);
1896 for (i = 0; i < priv->num_rx_queues; i++) {
1897 rx_queue = priv->rx_queue[i];
1898 if (rx_queue->rx_skbuff)
1899 free_skb_rx_queue(rx_queue);
1902 dma_free_coherent(priv->dev,
1903 sizeof(struct txbd8) * priv->total_tx_ring_size +
1904 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1905 priv->tx_queue[0]->tx_bd_base,
1906 priv->tx_queue[0]->tx_bd_dma_base);
1909 void gfar_start(struct gfar_private *priv)
1911 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1915 /* Enable Rx/Tx hw queues */
1916 gfar_write(®s->rqueue, priv->rqueue);
1917 gfar_write(®s->tqueue, priv->tqueue);
1919 /* Initialize DMACTRL to have WWR and WOP */
1920 tempval = gfar_read(®s->dmactrl);
1921 tempval |= DMACTRL_INIT_SETTINGS;
1922 gfar_write(®s->dmactrl, tempval);
1924 /* Make sure we aren't stopped */
1925 tempval = gfar_read(®s->dmactrl);
1926 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1927 gfar_write(®s->dmactrl, tempval);
1929 for (i = 0; i < priv->num_grps; i++) {
1930 regs = priv->gfargrp[i].regs;
1931 /* Clear THLT/RHLT, so that the DMA starts polling now */
1932 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1933 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1936 /* Enable Rx/Tx DMA */
1937 tempval = gfar_read(®s->maccfg1);
1938 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1939 gfar_write(®s->maccfg1, tempval);
1941 gfar_ints_enable(priv);
1943 priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1946 static void free_grp_irqs(struct gfar_priv_grp *grp)
1948 free_irq(gfar_irq(grp, TX)->irq, grp);
1949 free_irq(gfar_irq(grp, RX)->irq, grp);
1950 free_irq(gfar_irq(grp, ER)->irq, grp);
1953 static int register_grp_irqs(struct gfar_priv_grp *grp)
1955 struct gfar_private *priv = grp->priv;
1956 struct net_device *dev = priv->ndev;
1959 /* If the device has multiple interrupts, register for
1960 * them. Otherwise, only register for the one
1962 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1963 /* Install our interrupt handlers for Error,
1964 * Transmit, and Receive
1966 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1967 gfar_irq(grp, ER)->name, grp);
1969 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1970 gfar_irq(grp, ER)->irq);
1974 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1975 gfar_irq(grp, TX)->name, grp);
1977 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1978 gfar_irq(grp, TX)->irq);
1981 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
1982 gfar_irq(grp, RX)->name, grp);
1984 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1985 gfar_irq(grp, RX)->irq);
1989 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
1990 gfar_irq(grp, TX)->name, grp);
1992 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1993 gfar_irq(grp, TX)->irq);
2001 free_irq(gfar_irq(grp, TX)->irq, grp);
2003 free_irq(gfar_irq(grp, ER)->irq, grp);
2009 static void gfar_free_irq(struct gfar_private *priv)
2014 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2015 for (i = 0; i < priv->num_grps; i++)
2016 free_grp_irqs(&priv->gfargrp[i]);
2018 for (i = 0; i < priv->num_grps; i++)
2019 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2024 static int gfar_request_irq(struct gfar_private *priv)
2028 for (i = 0; i < priv->num_grps; i++) {
2029 err = register_grp_irqs(&priv->gfargrp[i]);
2031 for (j = 0; j < i; j++)
2032 free_grp_irqs(&priv->gfargrp[j]);
2040 /* Bring the controller up and running */
2041 int startup_gfar(struct net_device *ndev)
2043 struct gfar_private *priv = netdev_priv(ndev);
2046 gfar_mac_reset(priv);
2048 err = gfar_alloc_skb_resources(ndev);
2052 gfar_init_tx_rx_base(priv);
2054 smp_mb__before_clear_bit();
2055 clear_bit(GFAR_DOWN, &priv->state);
2056 smp_mb__after_clear_bit();
2058 /* Start Rx/Tx DMA and enable the interrupts */
2061 phy_start(priv->phydev);
2065 netif_tx_wake_all_queues(ndev);
2070 /* Called when something needs to use the ethernet device
2071 * Returns 0 for success.
2073 static int gfar_enet_open(struct net_device *dev)
2075 struct gfar_private *priv = netdev_priv(dev);
2078 err = init_phy(dev);
2082 err = gfar_request_irq(priv);
2086 err = startup_gfar(dev);
2090 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2095 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2097 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2099 memset(fcb, 0, GMAC_FCB_LEN);
2104 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2107 /* If we're here, it's a IP packet with a TCP or UDP
2108 * payload. We set it to checksum, using a pseudo-header
2111 u8 flags = TXFCB_DEFAULT;
2113 /* Tell the controller what the protocol is
2114 * And provide the already calculated phcs
2116 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2118 fcb->phcs = udp_hdr(skb)->check;
2120 fcb->phcs = tcp_hdr(skb)->check;
2122 /* l3os is the distance between the start of the
2123 * frame (skb->data) and the start of the IP hdr.
2124 * l4os is the distance between the start of the
2125 * l3 hdr and the l4 hdr
2127 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2128 fcb->l4os = skb_network_header_len(skb);
2133 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2135 fcb->flags |= TXFCB_VLN;
2136 fcb->vlctl = vlan_tx_tag_get(skb);
2139 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2140 struct txbd8 *base, int ring_size)
2142 struct txbd8 *new_bd = bdp + stride;
2144 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2147 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2150 return skip_txbd(bdp, 1, base, ring_size);
2153 /* eTSEC12: csum generation not supported for some fcb offsets */
2154 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2155 unsigned long fcb_addr)
2157 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2158 (fcb_addr % 0x20) > 0x18);
2161 /* eTSEC76: csum generation for frames larger than 2500 may
2162 * cause excess delays before start of transmission
2164 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2167 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2171 /* This is called by the kernel when a frame is ready for transmission.
2172 * It is pointed to by the dev->hard_start_xmit function pointer
2174 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2176 struct gfar_private *priv = netdev_priv(dev);
2177 struct gfar_priv_tx_q *tx_queue = NULL;
2178 struct netdev_queue *txq;
2179 struct gfar __iomem *regs = NULL;
2180 struct txfcb *fcb = NULL;
2181 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2184 int do_tstamp, do_csum, do_vlan;
2186 unsigned long flags;
2187 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2189 rq = skb->queue_mapping;
2190 tx_queue = priv->tx_queue[rq];
2191 txq = netdev_get_tx_queue(dev, rq);
2192 base = tx_queue->tx_bd_base;
2193 regs = tx_queue->grp->regs;
2195 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2196 do_vlan = vlan_tx_tag_present(skb);
2197 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2200 if (do_csum || do_vlan)
2201 fcb_len = GMAC_FCB_LEN;
2203 /* check if time stamp should be generated */
2204 if (unlikely(do_tstamp))
2205 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2207 /* make space for additional header when fcb is needed */
2208 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2209 struct sk_buff *skb_new;
2211 skb_new = skb_realloc_headroom(skb, fcb_len);
2213 dev->stats.tx_errors++;
2214 dev_kfree_skb_any(skb);
2215 return NETDEV_TX_OK;
2219 skb_set_owner_w(skb_new, skb->sk);
2220 dev_consume_skb_any(skb);
2224 /* total number of fragments in the SKB */
2225 nr_frags = skb_shinfo(skb)->nr_frags;
2227 /* calculate the required number of TxBDs for this skb */
2228 if (unlikely(do_tstamp))
2229 nr_txbds = nr_frags + 2;
2231 nr_txbds = nr_frags + 1;
2233 /* check if there is space to queue this packet */
2234 if (nr_txbds > tx_queue->num_txbdfree) {
2235 /* no space, stop the queue */
2236 netif_tx_stop_queue(txq);
2237 dev->stats.tx_fifo_errors++;
2238 return NETDEV_TX_BUSY;
2241 /* Update transmit stats */
2242 bytes_sent = skb->len;
2243 tx_queue->stats.tx_bytes += bytes_sent;
2244 /* keep Tx bytes on wire for BQL accounting */
2245 GFAR_CB(skb)->bytes_sent = bytes_sent;
2246 tx_queue->stats.tx_packets++;
2248 txbdp = txbdp_start = tx_queue->cur_tx;
2249 lstatus = txbdp->lstatus;
2251 /* Time stamp insertion requires one additional TxBD */
2252 if (unlikely(do_tstamp))
2253 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2254 tx_queue->tx_ring_size);
2256 if (nr_frags == 0) {
2257 if (unlikely(do_tstamp))
2258 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2261 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2263 /* Place the fragment addresses and lengths into the TxBDs */
2264 for (i = 0; i < nr_frags; i++) {
2265 unsigned int frag_len;
2266 /* Point at the next BD, wrapping as needed */
2267 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2269 frag_len = skb_shinfo(skb)->frags[i].size;
2271 lstatus = txbdp->lstatus | frag_len |
2272 BD_LFLAG(TXBD_READY);
2274 /* Handle the last BD specially */
2275 if (i == nr_frags - 1)
2276 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2278 bufaddr = skb_frag_dma_map(priv->dev,
2279 &skb_shinfo(skb)->frags[i],
2284 /* set the TxBD length and buffer pointer */
2285 txbdp->bufPtr = bufaddr;
2286 txbdp->lstatus = lstatus;
2289 lstatus = txbdp_start->lstatus;
2292 /* Add TxPAL between FCB and frame if required */
2293 if (unlikely(do_tstamp)) {
2294 skb_push(skb, GMAC_TXPAL_LEN);
2295 memset(skb->data, 0, GMAC_TXPAL_LEN);
2298 /* Add TxFCB if required */
2300 fcb = gfar_add_fcb(skb);
2301 lstatus |= BD_LFLAG(TXBD_TOE);
2304 /* Set up checksumming */
2306 gfar_tx_checksum(skb, fcb, fcb_len);
2308 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2309 unlikely(gfar_csum_errata_76(priv, skb->len))) {
2310 __skb_pull(skb, GMAC_FCB_LEN);
2311 skb_checksum_help(skb);
2312 if (do_vlan || do_tstamp) {
2313 /* put back a new fcb for vlan/tstamp TOE */
2314 fcb = gfar_add_fcb(skb);
2316 /* Tx TOE not used */
2317 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2324 gfar_tx_vlan(skb, fcb);
2326 /* Setup tx hardware time stamping if requested */
2327 if (unlikely(do_tstamp)) {
2328 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2332 txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
2333 skb_headlen(skb), DMA_TO_DEVICE);
2335 /* If time stamping is requested one additional TxBD must be set up. The
2336 * first TxBD points to the FCB and must have a data length of
2337 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2338 * the full frame length.
2340 if (unlikely(do_tstamp)) {
2341 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2342 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2343 (skb_headlen(skb) - fcb_len);
2344 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2346 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2349 netdev_tx_sent_queue(txq, bytes_sent);
2351 /* We can work in parallel with gfar_clean_tx_ring(), except
2352 * when modifying num_txbdfree. Note that we didn't grab the lock
2353 * when we were reading the num_txbdfree and checking for available
2354 * space, that's because outside of this function it can only grow,
2355 * and once we've got needed space, it cannot suddenly disappear.
2357 * The lock also protects us from gfar_error(), which can modify
2358 * regs->tstat and thus retrigger the transfers, which is why we
2359 * also must grab the lock before setting ready bit for the first
2360 * to be transmitted BD.
2362 spin_lock_irqsave(&tx_queue->txlock, flags);
2364 /* The powerpc-specific eieio() is used, as wmb() has too strong
2365 * semantics (it requires synchronization between cacheable and
2366 * uncacheable mappings, which eieio doesn't provide and which we
2367 * don't need), thus requiring a more expensive sync instruction. At
2368 * some point, the set of architecture-independent barrier functions
2369 * should be expanded to include weaker barriers.
2373 txbdp_start->lstatus = lstatus;
2375 eieio(); /* force lstatus write before tx_skbuff */
2377 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2379 /* Update the current skb pointer to the next entry we will use
2380 * (wrapping if necessary)
2382 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2383 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2385 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2387 /* reduce TxBD free count */
2388 tx_queue->num_txbdfree -= (nr_txbds);
2390 /* If the next BD still needs to be cleaned up, then the bds
2391 * are full. We need to tell the kernel to stop sending us stuff.
2393 if (!tx_queue->num_txbdfree) {
2394 netif_tx_stop_queue(txq);
2396 dev->stats.tx_fifo_errors++;
2399 /* Tell the DMA to go go go */
2400 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2403 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2405 return NETDEV_TX_OK;
2408 /* Stops the kernel queue, and halts the controller */
2409 static int gfar_close(struct net_device *dev)
2411 struct gfar_private *priv = netdev_priv(dev);
2413 cancel_work_sync(&priv->reset_task);
2416 /* Disconnect from the PHY */
2417 phy_disconnect(priv->phydev);
2418 priv->phydev = NULL;
2420 gfar_free_irq(priv);
2425 /* Changes the mac address if the controller is not running. */
2426 static int gfar_set_mac_address(struct net_device *dev)
2428 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2433 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2435 struct gfar_private *priv = netdev_priv(dev);
2436 int frame_size = new_mtu + ETH_HLEN;
2438 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2439 netif_err(priv, drv, dev, "Invalid MTU setting\n");
2443 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2446 if (dev->flags & IFF_UP)
2451 if (dev->flags & IFF_UP)
2454 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2459 void reset_gfar(struct net_device *ndev)
2461 struct gfar_private *priv = netdev_priv(ndev);
2463 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2469 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2472 /* gfar_reset_task gets scheduled when a packet has not been
2473 * transmitted after a set amount of time.
2474 * For now, assume that clearing out all the structures, and
2475 * starting over will fix the problem.
2477 static void gfar_reset_task(struct work_struct *work)
2479 struct gfar_private *priv = container_of(work, struct gfar_private,
2481 reset_gfar(priv->ndev);
2484 static void gfar_timeout(struct net_device *dev)
2486 struct gfar_private *priv = netdev_priv(dev);
2488 dev->stats.tx_errors++;
2489 schedule_work(&priv->reset_task);
2492 static void gfar_align_skb(struct sk_buff *skb)
2494 /* We need the data buffer to be aligned properly. We will reserve
2495 * as many bytes as needed to align the data properly
2497 skb_reserve(skb, RXBUF_ALIGNMENT -
2498 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2501 /* Interrupt Handler for Transmit complete */
2502 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2504 struct net_device *dev = tx_queue->dev;
2505 struct netdev_queue *txq;
2506 struct gfar_private *priv = netdev_priv(dev);
2507 struct txbd8 *bdp, *next = NULL;
2508 struct txbd8 *lbdp = NULL;
2509 struct txbd8 *base = tx_queue->tx_bd_base;
2510 struct sk_buff *skb;
2512 int tx_ring_size = tx_queue->tx_ring_size;
2513 int frags = 0, nr_txbds = 0;
2516 int tqi = tx_queue->qindex;
2517 unsigned int bytes_sent = 0;
2521 txq = netdev_get_tx_queue(dev, tqi);
2522 bdp = tx_queue->dirty_tx;
2523 skb_dirtytx = tx_queue->skb_dirtytx;
2525 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2526 unsigned long flags;
2528 frags = skb_shinfo(skb)->nr_frags;
2530 /* When time stamping, one additional TxBD must be freed.
2531 * Also, we need to dma_unmap_single() the TxPAL.
2533 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2534 nr_txbds = frags + 2;
2536 nr_txbds = frags + 1;
2538 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2540 lstatus = lbdp->lstatus;
2542 /* Only clean completed frames */
2543 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2544 (lstatus & BD_LENGTH_MASK))
2547 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2548 next = next_txbd(bdp, base, tx_ring_size);
2549 buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2551 buflen = bdp->length;
2553 dma_unmap_single(priv->dev, bdp->bufPtr,
2554 buflen, DMA_TO_DEVICE);
2556 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2557 struct skb_shared_hwtstamps shhwtstamps;
2558 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2560 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2561 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2562 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2563 skb_tstamp_tx(skb, &shhwtstamps);
2564 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2568 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2569 bdp = next_txbd(bdp, base, tx_ring_size);
2571 for (i = 0; i < frags; i++) {
2572 dma_unmap_page(priv->dev, bdp->bufPtr,
2573 bdp->length, DMA_TO_DEVICE);
2574 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2575 bdp = next_txbd(bdp, base, tx_ring_size);
2578 bytes_sent += GFAR_CB(skb)->bytes_sent;
2580 dev_kfree_skb_any(skb);
2582 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2584 skb_dirtytx = (skb_dirtytx + 1) &
2585 TX_RING_MOD_MASK(tx_ring_size);
2588 spin_lock_irqsave(&tx_queue->txlock, flags);
2589 tx_queue->num_txbdfree += nr_txbds;
2590 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2593 /* If we freed a buffer, we can restart transmission, if necessary */
2594 if (tx_queue->num_txbdfree &&
2595 netif_tx_queue_stopped(txq) &&
2596 !(test_bit(GFAR_DOWN, &priv->state)))
2597 netif_wake_subqueue(priv->ndev, tqi);
2599 /* Update dirty indicators */
2600 tx_queue->skb_dirtytx = skb_dirtytx;
2601 tx_queue->dirty_tx = bdp;
2603 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2606 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2607 struct sk_buff *skb)
2609 struct net_device *dev = rx_queue->dev;
2610 struct gfar_private *priv = netdev_priv(dev);
2613 buf = dma_map_single(priv->dev, skb->data,
2614 priv->rx_buffer_size, DMA_FROM_DEVICE);
2615 gfar_init_rxbdp(rx_queue, bdp, buf);
2618 static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2620 struct gfar_private *priv = netdev_priv(dev);
2621 struct sk_buff *skb;
2623 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2627 gfar_align_skb(skb);
2632 struct sk_buff *gfar_new_skb(struct net_device *dev)
2634 return gfar_alloc_skb(dev);
2637 static inline void count_errors(unsigned short status, struct net_device *dev)
2639 struct gfar_private *priv = netdev_priv(dev);
2640 struct net_device_stats *stats = &dev->stats;
2641 struct gfar_extra_stats *estats = &priv->extra_stats;
2643 /* If the packet was truncated, none of the other errors matter */
2644 if (status & RXBD_TRUNCATED) {
2645 stats->rx_length_errors++;
2647 atomic64_inc(&estats->rx_trunc);
2651 /* Count the errors, if there were any */
2652 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2653 stats->rx_length_errors++;
2655 if (status & RXBD_LARGE)
2656 atomic64_inc(&estats->rx_large);
2658 atomic64_inc(&estats->rx_short);
2660 if (status & RXBD_NONOCTET) {
2661 stats->rx_frame_errors++;
2662 atomic64_inc(&estats->rx_nonoctet);
2664 if (status & RXBD_CRCERR) {
2665 atomic64_inc(&estats->rx_crcerr);
2666 stats->rx_crc_errors++;
2668 if (status & RXBD_OVERRUN) {
2669 atomic64_inc(&estats->rx_overrun);
2670 stats->rx_crc_errors++;
2674 irqreturn_t gfar_receive(int irq, void *grp_id)
2676 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2677 unsigned long flags;
2680 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2681 spin_lock_irqsave(&grp->grplock, flags);
2682 imask = gfar_read(&grp->regs->imask);
2683 imask &= IMASK_RX_DISABLED;
2684 gfar_write(&grp->regs->imask, imask);
2685 spin_unlock_irqrestore(&grp->grplock, flags);
2686 __napi_schedule(&grp->napi_rx);
2688 /* Clear IEVENT, so interrupts aren't called again
2689 * because of the packets that have already arrived.
2691 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2697 /* Interrupt Handler for Transmit complete */
2698 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2700 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2701 unsigned long flags;
2704 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2705 spin_lock_irqsave(&grp->grplock, flags);
2706 imask = gfar_read(&grp->regs->imask);
2707 imask &= IMASK_TX_DISABLED;
2708 gfar_write(&grp->regs->imask, imask);
2709 spin_unlock_irqrestore(&grp->grplock, flags);
2710 __napi_schedule(&grp->napi_tx);
2712 /* Clear IEVENT, so interrupts aren't called again
2713 * because of the packets that have already arrived.
2715 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2721 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2723 /* If valid headers were found, and valid sums
2724 * were verified, then we tell the kernel that no
2725 * checksumming is necessary. Otherwise, it is [FIXME]
2727 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2728 skb->ip_summed = CHECKSUM_UNNECESSARY;
2730 skb_checksum_none_assert(skb);
2734 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2735 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2736 int amount_pull, struct napi_struct *napi)
2738 struct gfar_private *priv = netdev_priv(dev);
2739 struct rxfcb *fcb = NULL;
2741 /* fcb is at the beginning if exists */
2742 fcb = (struct rxfcb *)skb->data;
2744 /* Remove the FCB from the skb
2745 * Remove the padded bytes, if there are any
2748 skb_record_rx_queue(skb, fcb->rq);
2749 skb_pull(skb, amount_pull);
2752 /* Get receive timestamp from the skb */
2753 if (priv->hwts_rx_en) {
2754 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2755 u64 *ns = (u64 *) skb->data;
2757 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2758 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2762 skb_pull(skb, priv->padding);
2764 if (dev->features & NETIF_F_RXCSUM)
2765 gfar_rx_checksum(skb, fcb);
2767 /* Tell the skb what kind of packet this is */
2768 skb->protocol = eth_type_trans(skb, dev);
2770 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2771 * Even if vlan rx accel is disabled, on some chips
2772 * RXFCB_VLN is pseudo randomly set.
2774 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2775 fcb->flags & RXFCB_VLN)
2776 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
2778 /* Send the packet up the stack */
2779 napi_gro_receive(napi, skb);
2783 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2784 * until the budget/quota has been reached. Returns the number
2787 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2789 struct net_device *dev = rx_queue->dev;
2790 struct rxbd8 *bdp, *base;
2791 struct sk_buff *skb;
2795 struct gfar_private *priv = netdev_priv(dev);
2797 /* Get the first full descriptor */
2798 bdp = rx_queue->cur_rx;
2799 base = rx_queue->rx_bd_base;
2801 amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2803 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2804 struct sk_buff *newskb;
2808 /* Add another skb for the future */
2809 newskb = gfar_new_skb(dev);
2811 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2813 dma_unmap_single(priv->dev, bdp->bufPtr,
2814 priv->rx_buffer_size, DMA_FROM_DEVICE);
2816 if (unlikely(!(bdp->status & RXBD_ERR) &&
2817 bdp->length > priv->rx_buffer_size))
2818 bdp->status = RXBD_LARGE;
2820 /* We drop the frame if we failed to allocate a new buffer */
2821 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2822 bdp->status & RXBD_ERR)) {
2823 count_errors(bdp->status, dev);
2825 if (unlikely(!newskb))
2830 /* Increment the number of packets */
2831 rx_queue->stats.rx_packets++;
2835 pkt_len = bdp->length - ETH_FCS_LEN;
2836 /* Remove the FCS from the packet length */
2837 skb_put(skb, pkt_len);
2838 rx_queue->stats.rx_bytes += pkt_len;
2839 skb_record_rx_queue(skb, rx_queue->qindex);
2840 gfar_process_frame(dev, skb, amount_pull,
2841 &rx_queue->grp->napi_rx);
2844 netif_warn(priv, rx_err, dev, "Missing skb!\n");
2845 rx_queue->stats.rx_dropped++;
2846 atomic64_inc(&priv->extra_stats.rx_skbmissing);
2851 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2853 /* Setup the new bdp */
2854 gfar_new_rxbdp(rx_queue, bdp, newskb);
2856 /* Update to the next pointer */
2857 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2859 /* update to point at the next skb */
2860 rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2861 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2864 /* Update the current rxbd pointer to be the next one */
2865 rx_queue->cur_rx = bdp;
2870 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2872 struct gfar_priv_grp *gfargrp =
2873 container_of(napi, struct gfar_priv_grp, napi_rx);
2874 struct gfar __iomem *regs = gfargrp->regs;
2875 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2878 /* Clear IEVENT, so interrupts aren't called again
2879 * because of the packets that have already arrived
2881 gfar_write(®s->ievent, IEVENT_RX_MASK);
2883 work_done = gfar_clean_rx_ring(rx_queue, budget);
2885 if (work_done < budget) {
2887 napi_complete(napi);
2888 /* Clear the halt bit in RSTAT */
2889 gfar_write(®s->rstat, gfargrp->rstat);
2891 spin_lock_irq(&gfargrp->grplock);
2892 imask = gfar_read(®s->imask);
2893 imask |= IMASK_RX_DEFAULT;
2894 gfar_write(®s->imask, imask);
2895 spin_unlock_irq(&gfargrp->grplock);
2901 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2903 struct gfar_priv_grp *gfargrp =
2904 container_of(napi, struct gfar_priv_grp, napi_tx);
2905 struct gfar __iomem *regs = gfargrp->regs;
2906 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2909 /* Clear IEVENT, so interrupts aren't called again
2910 * because of the packets that have already arrived
2912 gfar_write(®s->ievent, IEVENT_TX_MASK);
2914 /* run Tx cleanup to completion */
2915 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2916 gfar_clean_tx_ring(tx_queue);
2918 napi_complete(napi);
2920 spin_lock_irq(&gfargrp->grplock);
2921 imask = gfar_read(®s->imask);
2922 imask |= IMASK_TX_DEFAULT;
2923 gfar_write(®s->imask, imask);
2924 spin_unlock_irq(&gfargrp->grplock);
2929 static int gfar_poll_rx(struct napi_struct *napi, int budget)
2931 struct gfar_priv_grp *gfargrp =
2932 container_of(napi, struct gfar_priv_grp, napi_rx);
2933 struct gfar_private *priv = gfargrp->priv;
2934 struct gfar __iomem *regs = gfargrp->regs;
2935 struct gfar_priv_rx_q *rx_queue = NULL;
2936 int work_done = 0, work_done_per_q = 0;
2937 int i, budget_per_q = 0;
2938 unsigned long rstat_rxf;
2941 /* Clear IEVENT, so interrupts aren't called again
2942 * because of the packets that have already arrived
2944 gfar_write(®s->ievent, IEVENT_RX_MASK);
2946 rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK;
2948 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2950 budget_per_q = budget/num_act_queues;
2952 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2953 /* skip queue if not active */
2954 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2957 rx_queue = priv->rx_queue[i];
2959 gfar_clean_rx_ring(rx_queue, budget_per_q);
2960 work_done += work_done_per_q;
2962 /* finished processing this queue */
2963 if (work_done_per_q < budget_per_q) {
2964 /* clear active queue hw indication */
2965 gfar_write(®s->rstat,
2966 RSTAT_CLEAR_RXF0 >> i);
2969 if (!num_act_queues)
2974 if (!num_act_queues) {
2976 napi_complete(napi);
2978 /* Clear the halt bit in RSTAT */
2979 gfar_write(®s->rstat, gfargrp->rstat);
2981 spin_lock_irq(&gfargrp->grplock);
2982 imask = gfar_read(®s->imask);
2983 imask |= IMASK_RX_DEFAULT;
2984 gfar_write(®s->imask, imask);
2985 spin_unlock_irq(&gfargrp->grplock);
2991 static int gfar_poll_tx(struct napi_struct *napi, int budget)
2993 struct gfar_priv_grp *gfargrp =
2994 container_of(napi, struct gfar_priv_grp, napi_tx);
2995 struct gfar_private *priv = gfargrp->priv;
2996 struct gfar __iomem *regs = gfargrp->regs;
2997 struct gfar_priv_tx_q *tx_queue = NULL;
2998 int has_tx_work = 0;
3001 /* Clear IEVENT, so interrupts aren't called again
3002 * because of the packets that have already arrived
3004 gfar_write(®s->ievent, IEVENT_TX_MASK);
3006 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3007 tx_queue = priv->tx_queue[i];
3008 /* run Tx cleanup to completion */
3009 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3010 gfar_clean_tx_ring(tx_queue);
3017 napi_complete(napi);
3019 spin_lock_irq(&gfargrp->grplock);
3020 imask = gfar_read(®s->imask);
3021 imask |= IMASK_TX_DEFAULT;
3022 gfar_write(®s->imask, imask);
3023 spin_unlock_irq(&gfargrp->grplock);
3030 #ifdef CONFIG_NET_POLL_CONTROLLER
3031 /* Polling 'interrupt' - used by things like netconsole to send skbs
3032 * without having to re-enable interrupts. It's not called while
3033 * the interrupt routine is executing.
3035 static void gfar_netpoll(struct net_device *dev)
3037 struct gfar_private *priv = netdev_priv(dev);
3040 /* If the device has multiple interrupts, run tx/rx */
3041 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3042 for (i = 0; i < priv->num_grps; i++) {
3043 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3045 disable_irq(gfar_irq(grp, TX)->irq);
3046 disable_irq(gfar_irq(grp, RX)->irq);
3047 disable_irq(gfar_irq(grp, ER)->irq);
3048 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3049 enable_irq(gfar_irq(grp, ER)->irq);
3050 enable_irq(gfar_irq(grp, RX)->irq);
3051 enable_irq(gfar_irq(grp, TX)->irq);
3054 for (i = 0; i < priv->num_grps; i++) {
3055 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3057 disable_irq(gfar_irq(grp, TX)->irq);
3058 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3059 enable_irq(gfar_irq(grp, TX)->irq);
3065 /* The interrupt handler for devices with one interrupt */
3066 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3068 struct gfar_priv_grp *gfargrp = grp_id;
3070 /* Save ievent for future reference */
3071 u32 events = gfar_read(&gfargrp->regs->ievent);
3073 /* Check for reception */
3074 if (events & IEVENT_RX_MASK)
3075 gfar_receive(irq, grp_id);
3077 /* Check for transmit completion */
3078 if (events & IEVENT_TX_MASK)
3079 gfar_transmit(irq, grp_id);
3081 /* Check for errors */
3082 if (events & IEVENT_ERR_MASK)
3083 gfar_error(irq, grp_id);
3088 /* Called every time the controller might need to be made
3089 * aware of new link state. The PHY code conveys this
3090 * information through variables in the phydev structure, and this
3091 * function converts those variables into the appropriate
3092 * register values, and can bring down the device if needed.
3094 static void adjust_link(struct net_device *dev)
3096 struct gfar_private *priv = netdev_priv(dev);
3097 struct phy_device *phydev = priv->phydev;
3099 if (unlikely(phydev->link != priv->oldlink ||
3100 phydev->duplex != priv->oldduplex ||
3101 phydev->speed != priv->oldspeed))
3102 gfar_update_link_state(priv);
3105 /* Update the hash table based on the current list of multicast
3106 * addresses we subscribe to. Also, change the promiscuity of
3107 * the device based on the flags (this function is called
3108 * whenever dev->flags is changed
3110 static void gfar_set_multi(struct net_device *dev)
3112 struct netdev_hw_addr *ha;
3113 struct gfar_private *priv = netdev_priv(dev);
3114 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3117 if (dev->flags & IFF_PROMISC) {
3118 /* Set RCTRL to PROM */
3119 tempval = gfar_read(®s->rctrl);
3120 tempval |= RCTRL_PROM;
3121 gfar_write(®s->rctrl, tempval);
3123 /* Set RCTRL to not PROM */
3124 tempval = gfar_read(®s->rctrl);
3125 tempval &= ~(RCTRL_PROM);
3126 gfar_write(®s->rctrl, tempval);
3129 if (dev->flags & IFF_ALLMULTI) {
3130 /* Set the hash to rx all multicast frames */
3131 gfar_write(®s->igaddr0, 0xffffffff);
3132 gfar_write(®s->igaddr1, 0xffffffff);
3133 gfar_write(®s->igaddr2, 0xffffffff);
3134 gfar_write(®s->igaddr3, 0xffffffff);
3135 gfar_write(®s->igaddr4, 0xffffffff);
3136 gfar_write(®s->igaddr5, 0xffffffff);
3137 gfar_write(®s->igaddr6, 0xffffffff);
3138 gfar_write(®s->igaddr7, 0xffffffff);
3139 gfar_write(®s->gaddr0, 0xffffffff);
3140 gfar_write(®s->gaddr1, 0xffffffff);
3141 gfar_write(®s->gaddr2, 0xffffffff);
3142 gfar_write(®s->gaddr3, 0xffffffff);
3143 gfar_write(®s->gaddr4, 0xffffffff);
3144 gfar_write(®s->gaddr5, 0xffffffff);
3145 gfar_write(®s->gaddr6, 0xffffffff);
3146 gfar_write(®s->gaddr7, 0xffffffff);
3151 /* zero out the hash */
3152 gfar_write(®s->igaddr0, 0x0);
3153 gfar_write(®s->igaddr1, 0x0);
3154 gfar_write(®s->igaddr2, 0x0);
3155 gfar_write(®s->igaddr3, 0x0);
3156 gfar_write(®s->igaddr4, 0x0);
3157 gfar_write(®s->igaddr5, 0x0);
3158 gfar_write(®s->igaddr6, 0x0);
3159 gfar_write(®s->igaddr7, 0x0);
3160 gfar_write(®s->gaddr0, 0x0);
3161 gfar_write(®s->gaddr1, 0x0);
3162 gfar_write(®s->gaddr2, 0x0);
3163 gfar_write(®s->gaddr3, 0x0);
3164 gfar_write(®s->gaddr4, 0x0);
3165 gfar_write(®s->gaddr5, 0x0);
3166 gfar_write(®s->gaddr6, 0x0);
3167 gfar_write(®s->gaddr7, 0x0);
3169 /* If we have extended hash tables, we need to
3170 * clear the exact match registers to prepare for
3173 if (priv->extended_hash) {
3174 em_num = GFAR_EM_NUM + 1;
3175 gfar_clear_exact_match(dev);
3182 if (netdev_mc_empty(dev))
3185 /* Parse the list, and set the appropriate bits */
3186 netdev_for_each_mc_addr(ha, dev) {
3188 gfar_set_mac_for_addr(dev, idx, ha->addr);
3191 gfar_set_hash_for_addr(dev, ha->addr);
3197 /* Clears each of the exact match registers to zero, so they
3198 * don't interfere with normal reception
3200 static void gfar_clear_exact_match(struct net_device *dev)
3203 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3205 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3206 gfar_set_mac_for_addr(dev, idx, zero_arr);
3209 /* Set the appropriate hash bit for the given addr */
3210 /* The algorithm works like so:
3211 * 1) Take the Destination Address (ie the multicast address), and
3212 * do a CRC on it (little endian), and reverse the bits of the
3214 * 2) Use the 8 most significant bits as a hash into a 256-entry
3215 * table. The table is controlled through 8 32-bit registers:
3216 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3217 * gaddr7. This means that the 3 most significant bits in the
3218 * hash index which gaddr register to use, and the 5 other bits
3219 * indicate which bit (assuming an IBM numbering scheme, which
3220 * for PowerPC (tm) is usually the case) in the register holds
3223 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3226 struct gfar_private *priv = netdev_priv(dev);
3227 u32 result = ether_crc(ETH_ALEN, addr);
3228 int width = priv->hash_width;
3229 u8 whichbit = (result >> (32 - width)) & 0x1f;
3230 u8 whichreg = result >> (32 - width + 5);
3231 u32 value = (1 << (31-whichbit));
3233 tempval = gfar_read(priv->hash_regs[whichreg]);
3235 gfar_write(priv->hash_regs[whichreg], tempval);
3239 /* There are multiple MAC Address register pairs on some controllers
3240 * This function sets the numth pair to a given address
3242 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3245 struct gfar_private *priv = netdev_priv(dev);
3246 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3248 char tmpbuf[ETH_ALEN];
3250 u32 __iomem *macptr = ®s->macstnaddr1;
3254 /* Now copy it into the mac registers backwards, cuz
3255 * little endian is silly
3257 for (idx = 0; idx < ETH_ALEN; idx++)
3258 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3260 gfar_write(macptr, *((u32 *) (tmpbuf)));
3262 tempval = *((u32 *) (tmpbuf + 4));
3264 gfar_write(macptr+1, tempval);
3267 /* GFAR error interrupt handler */
3268 static irqreturn_t gfar_error(int irq, void *grp_id)
3270 struct gfar_priv_grp *gfargrp = grp_id;
3271 struct gfar __iomem *regs = gfargrp->regs;
3272 struct gfar_private *priv= gfargrp->priv;
3273 struct net_device *dev = priv->ndev;
3275 /* Save ievent for future reference */
3276 u32 events = gfar_read(®s->ievent);
3279 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
3281 /* Magic Packet is not an error. */
3282 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3283 (events & IEVENT_MAG))
3284 events &= ~IEVENT_MAG;
3287 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3289 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3290 events, gfar_read(®s->imask));
3292 /* Update the error counters */
3293 if (events & IEVENT_TXE) {
3294 dev->stats.tx_errors++;
3296 if (events & IEVENT_LC)
3297 dev->stats.tx_window_errors++;
3298 if (events & IEVENT_CRL)
3299 dev->stats.tx_aborted_errors++;
3300 if (events & IEVENT_XFUN) {
3301 unsigned long flags;
3303 netif_dbg(priv, tx_err, dev,
3304 "TX FIFO underrun, packet dropped\n");
3305 dev->stats.tx_dropped++;
3306 atomic64_inc(&priv->extra_stats.tx_underrun);
3308 local_irq_save(flags);
3311 /* Reactivate the Tx Queues */
3312 gfar_write(®s->tstat, gfargrp->tstat);
3315 local_irq_restore(flags);
3317 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3319 if (events & IEVENT_BSY) {
3320 dev->stats.rx_errors++;
3321 atomic64_inc(&priv->extra_stats.rx_bsy);
3323 gfar_receive(irq, grp_id);
3325 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3326 gfar_read(®s->rstat));
3328 if (events & IEVENT_BABR) {
3329 dev->stats.rx_errors++;
3330 atomic64_inc(&priv->extra_stats.rx_babr);
3332 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3334 if (events & IEVENT_EBERR) {
3335 atomic64_inc(&priv->extra_stats.eberr);
3336 netif_dbg(priv, rx_err, dev, "bus error\n");
3338 if (events & IEVENT_RXC)
3339 netif_dbg(priv, rx_status, dev, "control frame\n");
3341 if (events & IEVENT_BABT) {
3342 atomic64_inc(&priv->extra_stats.tx_babt);
3343 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3348 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3350 struct phy_device *phydev = priv->phydev;
3353 if (!phydev->duplex)
3356 if (!priv->pause_aneg_en) {
3357 if (priv->tx_pause_en)
3358 val |= MACCFG1_TX_FLOW;
3359 if (priv->rx_pause_en)
3360 val |= MACCFG1_RX_FLOW;
3362 u16 lcl_adv, rmt_adv;
3364 /* get link partner capabilities */
3367 rmt_adv = LPA_PAUSE_CAP;
3368 if (phydev->asym_pause)
3369 rmt_adv |= LPA_PAUSE_ASYM;
3371 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3373 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3374 if (flowctrl & FLOW_CTRL_TX)
3375 val |= MACCFG1_TX_FLOW;
3376 if (flowctrl & FLOW_CTRL_RX)
3377 val |= MACCFG1_RX_FLOW;
3383 static noinline void gfar_update_link_state(struct gfar_private *priv)
3385 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3386 struct phy_device *phydev = priv->phydev;
3388 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3392 u32 tempval1 = gfar_read(®s->maccfg1);
3393 u32 tempval = gfar_read(®s->maccfg2);
3394 u32 ecntrl = gfar_read(®s->ecntrl);
3396 if (phydev->duplex != priv->oldduplex) {
3397 if (!(phydev->duplex))
3398 tempval &= ~(MACCFG2_FULL_DUPLEX);
3400 tempval |= MACCFG2_FULL_DUPLEX;
3402 priv->oldduplex = phydev->duplex;
3405 if (phydev->speed != priv->oldspeed) {
3406 switch (phydev->speed) {
3409 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3411 ecntrl &= ~(ECNTRL_R100);
3416 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3418 /* Reduced mode distinguishes
3419 * between 10 and 100
3421 if (phydev->speed == SPEED_100)
3422 ecntrl |= ECNTRL_R100;
3424 ecntrl &= ~(ECNTRL_R100);
3427 netif_warn(priv, link, priv->ndev,
3428 "Ack! Speed (%d) is not 10/100/1000!\n",
3433 priv->oldspeed = phydev->speed;
3436 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3437 tempval1 |= gfar_get_flowctrl_cfg(priv);
3439 gfar_write(®s->maccfg1, tempval1);
3440 gfar_write(®s->maccfg2, tempval);
3441 gfar_write(®s->ecntrl, ecntrl);
3446 } else if (priv->oldlink) {
3449 priv->oldduplex = -1;
3452 if (netif_msg_link(priv))
3453 phy_print_status(phydev);
3456 static struct of_device_id gfar_match[] =
3460 .compatible = "gianfar",
3463 .compatible = "fsl,etsec2",
3467 MODULE_DEVICE_TABLE(of, gfar_match);
3469 /* Structure for a device driver */
3470 static struct platform_driver gfar_driver = {
3472 .name = "fsl-gianfar",
3473 .owner = THIS_MODULE,
3475 .of_match_table = gfar_match,
3477 .probe = gfar_probe,
3478 .remove = gfar_remove,
3481 module_platform_driver(gfar_driver);