1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017, National Instruments Corp.
4 * Author: Moritz Fischer <mdf@kernel.org>
7 #include <linux/etherdevice.h>
8 #include <linux/module.h>
9 #include <linux/netdevice.h>
10 #include <linux/of_address.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13 #include <linux/of_platform.h>
14 #include <linux/of_irq.h>
15 #include <linux/skbuff.h>
16 #include <linux/phy.h>
17 #include <linux/mii.h>
18 #include <linux/nvmem-consumer.h>
19 #include <linux/ethtool.h>
20 #include <linux/iopoll.h>
25 /* Axi DMA Register definitions */
26 #define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */
27 #define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */
28 #define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */
29 #define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */
31 #define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */
32 #define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */
33 #define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */
34 #define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */
36 #define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */
37 #define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */
39 #define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
40 #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
41 #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
42 #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
44 #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
45 #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
47 #define XAXIDMA_DELAY_SHIFT 24
48 #define XAXIDMA_COALESCE_SHIFT 16
50 #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
51 #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
52 #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
53 #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
55 /* Default TX/RX Threshold and waitbound values for SGDMA mode */
56 #define XAXIDMA_DFT_TX_THRESHOLD 24
57 #define XAXIDMA_DFT_TX_WAITBOUND 254
58 #define XAXIDMA_DFT_RX_THRESHOLD 24
59 #define XAXIDMA_DFT_RX_WAITBOUND 254
61 #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
62 #define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
63 #define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
64 #define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
65 #define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
66 #define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
67 #define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
68 #define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
69 #define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
71 #define NIXGE_REG_CTRL_OFFSET 0x4000
72 #define NIXGE_REG_INFO 0x00
73 #define NIXGE_REG_MAC_CTL 0x04
74 #define NIXGE_REG_PHY_CTL 0x08
75 #define NIXGE_REG_LED_CTL 0x0c
76 #define NIXGE_REG_MDIO_DATA 0x10
77 #define NIXGE_REG_MDIO_ADDR 0x14
78 #define NIXGE_REG_MDIO_OP 0x18
79 #define NIXGE_REG_MDIO_CTRL 0x1c
81 #define NIXGE_ID_LED_CTL_EN BIT(0)
82 #define NIXGE_ID_LED_CTL_VAL BIT(1)
84 #define NIXGE_MDIO_CLAUSE45 BIT(12)
85 #define NIXGE_MDIO_CLAUSE22 0
86 #define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
87 #define NIXGE_MDIO_OP_ADDRESS 0
88 #define NIXGE_MDIO_C45_WRITE BIT(0)
89 #define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0))
90 #define NIXGE_MDIO_C22_WRITE BIT(0)
91 #define NIXGE_MDIO_C22_READ BIT(1)
92 #define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
93 #define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
95 #define NIXGE_REG_MAC_LSB 0x1000
96 #define NIXGE_REG_MAC_MSB 0x1004
98 /* Packet size info */
99 #define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */
100 #define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
101 #define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */
102 #define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
104 #define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
105 #define NIXGE_MAX_JUMBO_FRAME_SIZE \
106 (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
108 struct nixge_hw_dma_bd {
127 struct nixge_tx_skb {
135 struct net_device *ndev;
136 struct napi_struct napi;
139 /* Connection to PHY device */
140 struct device_node *phy_node;
141 phy_interface_t phy_mode;
148 struct mii_bus *mii_bus; /* MII bus reference */
150 /* IO registers, dma functions and IRQs */
151 void __iomem *ctrl_regs;
152 void __iomem *dma_regs;
154 struct tasklet_struct dma_err_tasklet;
160 /* Buffer descriptors */
161 struct nixge_hw_dma_bd *tx_bd_v;
162 struct nixge_tx_skb *tx_skb;
165 struct nixge_hw_dma_bd *rx_bd_v;
171 u32 coalesce_count_rx;
172 u32 coalesce_count_tx;
175 static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
177 writel(val, priv->dma_regs + offset);
180 static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
182 return readl(priv->dma_regs + offset);
185 static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
187 writel(val, priv->ctrl_regs + offset);
190 static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
192 return readl(priv->ctrl_regs + offset);
195 #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
196 readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
197 (sleep_us), (timeout_us))
199 #define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
200 readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
201 (sleep_us), (timeout_us))
203 static void nixge_hw_dma_bd_release(struct net_device *ndev)
205 struct nixge_priv *priv = netdev_priv(ndev);
208 for (i = 0; i < RX_BD_NUM; i++) {
209 dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys,
210 NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
211 dev_kfree_skb((struct sk_buff *)
212 (priv->rx_bd_v[i].sw_id_offset));
216 dma_free_coherent(ndev->dev.parent,
217 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
222 devm_kfree(ndev->dev.parent, priv->tx_skb);
225 dma_free_coherent(ndev->dev.parent,
226 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
231 static int nixge_hw_dma_bd_init(struct net_device *ndev)
233 struct nixge_priv *priv = netdev_priv(ndev);
238 /* Reset the indexes which are used for accessing the BDs */
240 priv->tx_bd_tail = 0;
243 /* Allocate the Tx and Rx buffer descriptors. */
244 priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
245 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
246 &priv->tx_bd_p, GFP_KERNEL);
250 priv->tx_skb = devm_kzalloc(ndev->dev.parent,
251 sizeof(*priv->tx_skb) *
257 priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
258 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
259 &priv->rx_bd_p, GFP_KERNEL);
263 for (i = 0; i < TX_BD_NUM; i++) {
264 priv->tx_bd_v[i].next = priv->tx_bd_p +
265 sizeof(*priv->tx_bd_v) *
266 ((i + 1) % TX_BD_NUM);
269 for (i = 0; i < RX_BD_NUM; i++) {
270 priv->rx_bd_v[i].next = priv->rx_bd_p +
271 sizeof(*priv->rx_bd_v) *
272 ((i + 1) % RX_BD_NUM);
274 skb = netdev_alloc_skb_ip_align(ndev,
275 NIXGE_MAX_JUMBO_FRAME_SIZE);
279 priv->rx_bd_v[i].sw_id_offset = (u32)skb;
280 priv->rx_bd_v[i].phys =
281 dma_map_single(ndev->dev.parent,
283 NIXGE_MAX_JUMBO_FRAME_SIZE,
285 priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
288 /* Start updating the Rx channel control register */
289 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
290 /* Update the interrupt coalesce count */
291 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
292 ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
293 /* Update the delay timer count */
294 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
295 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
296 /* Enable coalesce, delay timer and error interrupts */
297 cr |= XAXIDMA_IRQ_ALL_MASK;
298 /* Write to the Rx channel control register */
299 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
301 /* Start updating the Tx channel control register */
302 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
303 /* Update the interrupt coalesce count */
304 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
305 ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
306 /* Update the delay timer count */
307 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
308 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
309 /* Enable coalesce, delay timer and error interrupts */
310 cr |= XAXIDMA_IRQ_ALL_MASK;
311 /* Write to the Tx channel control register */
312 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
314 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
315 * halted state. This will make the Rx side ready for reception.
317 nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
318 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
319 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
320 cr | XAXIDMA_CR_RUNSTOP_MASK);
321 nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
322 (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
324 /* Write to the RS (Run-stop) bit in the Tx channel control register.
325 * Tx channel is now ready to run. But only after we write to the
326 * tail pointer register that the Tx channel will start transmitting.
328 nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
329 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
330 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
331 cr | XAXIDMA_CR_RUNSTOP_MASK);
335 nixge_hw_dma_bd_release(ndev);
339 static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
344 /* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
345 * The reset process of Axi DMA takes a while to complete as all
346 * pending commands/transfers will be flushed or completed during
347 * this reset process.
349 nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
350 err = nixge_dma_poll_timeout(priv, offset, status,
351 !(status & XAXIDMA_CR_RESET_MASK), 10,
354 netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__);
357 static void nixge_device_reset(struct net_device *ndev)
359 struct nixge_priv *priv = netdev_priv(ndev);
361 __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET);
362 __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET);
364 if (nixge_hw_dma_bd_init(ndev))
365 netdev_err(ndev, "%s: descriptor allocation failed\n",
368 netif_trans_update(ndev);
371 static void nixge_handle_link_change(struct net_device *ndev)
373 struct nixge_priv *priv = netdev_priv(ndev);
374 struct phy_device *phydev = ndev->phydev;
376 if (phydev->link != priv->link || phydev->speed != priv->speed ||
377 phydev->duplex != priv->duplex) {
378 priv->link = phydev->link;
379 priv->speed = phydev->speed;
380 priv->duplex = phydev->duplex;
381 phy_print_status(phydev);
385 static void nixge_tx_skb_unmap(struct nixge_priv *priv,
386 struct nixge_tx_skb *tx_skb)
388 if (tx_skb->mapping) {
389 if (tx_skb->mapped_as_page)
390 dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping,
391 tx_skb->size, DMA_TO_DEVICE);
393 dma_unmap_single(priv->ndev->dev.parent,
395 tx_skb->size, DMA_TO_DEVICE);
400 dev_kfree_skb_any(tx_skb->skb);
405 static void nixge_start_xmit_done(struct net_device *ndev)
407 struct nixge_priv *priv = netdev_priv(ndev);
408 struct nixge_hw_dma_bd *cur_p;
409 struct nixge_tx_skb *tx_skb;
410 unsigned int status = 0;
414 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
415 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
417 status = cur_p->status;
419 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
420 nixge_tx_skb_unmap(priv, tx_skb);
423 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
427 priv->tx_bd_ci %= TX_BD_NUM;
428 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
429 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
430 status = cur_p->status;
433 ndev->stats.tx_packets += packets;
434 ndev->stats.tx_bytes += size;
437 netif_wake_queue(ndev);
440 static int nixge_check_tx_bd_space(struct nixge_priv *priv,
443 struct nixge_hw_dma_bd *cur_p;
445 cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
446 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
447 return NETDEV_TX_BUSY;
451 static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
453 struct nixge_priv *priv = netdev_priv(ndev);
454 struct nixge_hw_dma_bd *cur_p;
455 struct nixge_tx_skb *tx_skb;
461 num_frag = skb_shinfo(skb)->nr_frags;
462 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
463 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
465 if (nixge_check_tx_bd_space(priv, num_frag)) {
466 if (!netif_queue_stopped(ndev))
467 netif_stop_queue(ndev);
471 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
472 skb_headlen(skb), DMA_TO_DEVICE);
473 if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
476 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
479 tx_skb->mapping = cur_p->phys;
480 tx_skb->size = skb_headlen(skb);
481 tx_skb->mapped_as_page = false;
483 for (ii = 0; ii < num_frag; ii++) {
485 priv->tx_bd_tail %= TX_BD_NUM;
486 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
487 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
488 frag = &skb_shinfo(skb)->frags[ii];
490 cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
493 if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
496 cur_p->cntrl = skb_frag_size(frag);
499 tx_skb->mapping = cur_p->phys;
500 tx_skb->size = skb_frag_size(frag);
501 tx_skb->mapped_as_page = true;
504 /* last buffer of the frame */
507 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
508 cur_p->app4 = (unsigned long)skb;
510 tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
511 /* Start the transfer */
512 nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
514 priv->tx_bd_tail %= TX_BD_NUM;
518 for (; ii > 0; ii--) {
519 if (priv->tx_bd_tail)
522 priv->tx_bd_tail = TX_BD_NUM - 1;
524 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
525 nixge_tx_skb_unmap(priv, tx_skb);
527 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
530 dma_unmap_single(priv->ndev->dev.parent,
532 tx_skb->size, DMA_TO_DEVICE);
534 ndev->stats.tx_dropped++;
538 static int nixge_recv(struct net_device *ndev, int budget)
540 struct nixge_priv *priv = netdev_priv(ndev);
541 struct sk_buff *skb, *new_skb;
542 struct nixge_hw_dma_bd *cur_p;
543 dma_addr_t tail_p = 0;
548 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
550 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK &&
552 tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
555 skb = (struct sk_buff *)(cur_p->sw_id_offset);
557 length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
558 if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
559 length = NIXGE_MAX_JUMBO_FRAME_SIZE;
561 dma_unmap_single(ndev->dev.parent, cur_p->phys,
562 NIXGE_MAX_JUMBO_FRAME_SIZE,
565 skb_put(skb, length);
567 skb->protocol = eth_type_trans(skb, ndev);
568 skb_checksum_none_assert(skb);
570 /* For now mark them as CHECKSUM_NONE since
571 * we don't have offload capabilities
573 skb->ip_summed = CHECKSUM_NONE;
575 napi_gro_receive(&priv->napi, skb);
580 new_skb = netdev_alloc_skb_ip_align(ndev,
581 NIXGE_MAX_JUMBO_FRAME_SIZE);
585 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
586 NIXGE_MAX_JUMBO_FRAME_SIZE,
588 if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
589 /* FIXME: bail out and clean up */
590 netdev_err(ndev, "Failed to map ...\n");
592 cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
594 cur_p->sw_id_offset = (u32)new_skb;
597 priv->rx_bd_ci %= RX_BD_NUM;
598 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
601 ndev->stats.rx_packets += packets;
602 ndev->stats.rx_bytes += size;
605 nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
610 static int nixge_poll(struct napi_struct *napi, int budget)
612 struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi);
618 work_done = nixge_recv(priv->ndev, budget);
619 if (work_done < budget) {
620 napi_complete_done(napi, work_done);
621 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
623 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
624 /* If there's more, reschedule, but clear */
625 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
626 napi_reschedule(napi);
628 /* if not, turn on RX IRQs again ... */
629 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
630 cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
631 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
638 static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
640 struct nixge_priv *priv = netdev_priv(_ndev);
641 struct net_device *ndev = _ndev;
645 status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
646 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
647 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
648 nixge_start_xmit_done(priv->ndev);
651 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
652 netdev_err(ndev, "No interrupts asserted in Tx path\n");
655 if (status & XAXIDMA_IRQ_ERROR_MASK) {
656 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
657 netdev_err(ndev, "Current BD is at: 0x%x\n",
658 (priv->tx_bd_v[priv->tx_bd_ci]).phys);
660 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
661 /* Disable coalesce, delay timer and error interrupts */
662 cr &= (~XAXIDMA_IRQ_ALL_MASK);
663 /* Write to the Tx channel control register */
664 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
666 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
667 /* Disable coalesce, delay timer and error interrupts */
668 cr &= (~XAXIDMA_IRQ_ALL_MASK);
669 /* Write to the Rx channel control register */
670 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
672 tasklet_schedule(&priv->dma_err_tasklet);
673 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
679 static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
681 struct nixge_priv *priv = netdev_priv(_ndev);
682 struct net_device *ndev = _ndev;
686 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
687 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
688 /* Turn of IRQs because NAPI */
689 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
690 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
691 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
692 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
694 if (napi_schedule_prep(&priv->napi))
695 __napi_schedule(&priv->napi);
698 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
699 netdev_err(ndev, "No interrupts asserted in Rx path\n");
702 if (status & XAXIDMA_IRQ_ERROR_MASK) {
703 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
704 netdev_err(ndev, "Current BD is at: 0x%x\n",
705 (priv->rx_bd_v[priv->rx_bd_ci]).phys);
707 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
708 /* Disable coalesce, delay timer and error interrupts */
709 cr &= (~XAXIDMA_IRQ_ALL_MASK);
710 /* Finally write to the Tx channel control register */
711 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
713 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
714 /* Disable coalesce, delay timer and error interrupts */
715 cr &= (~XAXIDMA_IRQ_ALL_MASK);
716 /* write to the Rx channel control register */
717 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
719 tasklet_schedule(&priv->dma_err_tasklet);
720 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
726 static void nixge_dma_err_handler(unsigned long data)
728 struct nixge_priv *lp = (struct nixge_priv *)data;
729 struct nixge_hw_dma_bd *cur_p;
730 struct nixge_tx_skb *tx_skb;
733 __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
734 __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
736 for (i = 0; i < TX_BD_NUM; i++) {
737 cur_p = &lp->tx_bd_v[i];
738 tx_skb = &lp->tx_skb[i];
739 nixge_tx_skb_unmap(lp, tx_skb);
749 cur_p->sw_id_offset = 0;
752 for (i = 0; i < RX_BD_NUM; i++) {
753 cur_p = &lp->rx_bd_v[i];
766 /* Start updating the Rx channel control register */
767 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
768 /* Update the interrupt coalesce count */
769 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
770 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
771 /* Update the delay timer count */
772 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
773 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
774 /* Enable coalesce, delay timer and error interrupts */
775 cr |= XAXIDMA_IRQ_ALL_MASK;
776 /* Finally write to the Rx channel control register */
777 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr);
779 /* Start updating the Tx channel control register */
780 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
781 /* Update the interrupt coalesce count */
782 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
783 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
784 /* Update the delay timer count */
785 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
786 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
787 /* Enable coalesce, delay timer and error interrupts */
788 cr |= XAXIDMA_IRQ_ALL_MASK;
789 /* Finally write to the Tx channel control register */
790 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr);
792 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
793 * halted state. This will make the Rx side ready for reception.
795 nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
796 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
797 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
798 cr | XAXIDMA_CR_RUNSTOP_MASK);
799 nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
800 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
802 /* Write to the RS (Run-stop) bit in the Tx channel control register.
803 * Tx channel is now ready to run. But only after we write to the
804 * tail pointer register that the Tx channel will start transmitting
806 nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
807 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
808 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
809 cr | XAXIDMA_CR_RUNSTOP_MASK);
812 static int nixge_open(struct net_device *ndev)
814 struct nixge_priv *priv = netdev_priv(ndev);
815 struct phy_device *phy;
818 nixge_device_reset(ndev);
820 phy = of_phy_connect(ndev, priv->phy_node,
821 &nixge_handle_link_change, 0, priv->phy_mode);
827 /* Enable tasklets for Axi DMA error handling */
828 tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler,
829 (unsigned long)priv);
831 napi_enable(&priv->napi);
833 /* Enable interrupts for Axi DMA Tx */
834 ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev);
837 /* Enable interrupts for Axi DMA Rx */
838 ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev);
842 netif_start_queue(ndev);
847 free_irq(priv->tx_irq, ndev);
851 tasklet_kill(&priv->dma_err_tasklet);
852 netdev_err(ndev, "request_irq() failed\n");
856 static int nixge_stop(struct net_device *ndev)
858 struct nixge_priv *priv = netdev_priv(ndev);
861 netif_stop_queue(ndev);
862 napi_disable(&priv->napi);
865 phy_stop(ndev->phydev);
866 phy_disconnect(ndev->phydev);
869 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
870 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
871 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
872 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
873 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
874 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
876 tasklet_kill(&priv->dma_err_tasklet);
878 free_irq(priv->tx_irq, ndev);
879 free_irq(priv->rx_irq, ndev);
881 nixge_hw_dma_bd_release(ndev);
886 static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
888 if (netif_running(ndev))
891 if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) >
892 NIXGE_MAX_JUMBO_FRAME_SIZE)
900 static s32 __nixge_hw_set_mac_address(struct net_device *ndev)
902 struct nixge_priv *priv = netdev_priv(ndev);
904 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
905 (ndev->dev_addr[2]) << 24 |
906 (ndev->dev_addr[3] << 16) |
907 (ndev->dev_addr[4] << 8) |
908 (ndev->dev_addr[5] << 0));
910 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
911 (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
916 static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
920 err = eth_mac_addr(ndev, p);
922 __nixge_hw_set_mac_address(ndev);
927 static const struct net_device_ops nixge_netdev_ops = {
928 .ndo_open = nixge_open,
929 .ndo_stop = nixge_stop,
930 .ndo_start_xmit = nixge_start_xmit,
931 .ndo_change_mtu = nixge_change_mtu,
932 .ndo_set_mac_address = nixge_net_set_mac_address,
933 .ndo_validate_addr = eth_validate_addr,
936 static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
937 struct ethtool_drvinfo *ed)
939 strlcpy(ed->driver, "nixge", sizeof(ed->driver));
940 strlcpy(ed->bus_info, "platform", sizeof(ed->driver));
943 static int nixge_ethtools_get_coalesce(struct net_device *ndev,
944 struct ethtool_coalesce *ecoalesce)
946 struct nixge_priv *priv = netdev_priv(ndev);
949 regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
950 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
951 >> XAXIDMA_COALESCE_SHIFT;
952 regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
953 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
954 >> XAXIDMA_COALESCE_SHIFT;
958 static int nixge_ethtools_set_coalesce(struct net_device *ndev,
959 struct ethtool_coalesce *ecoalesce)
961 struct nixge_priv *priv = netdev_priv(ndev);
963 if (netif_running(ndev)) {
965 "Please stop netif before applying configuration\n");
969 if (ecoalesce->rx_coalesce_usecs ||
970 ecoalesce->rx_coalesce_usecs_irq ||
971 ecoalesce->rx_max_coalesced_frames_irq ||
972 ecoalesce->tx_coalesce_usecs ||
973 ecoalesce->tx_coalesce_usecs_irq ||
974 ecoalesce->tx_max_coalesced_frames_irq ||
975 ecoalesce->stats_block_coalesce_usecs ||
976 ecoalesce->use_adaptive_rx_coalesce ||
977 ecoalesce->use_adaptive_tx_coalesce ||
978 ecoalesce->pkt_rate_low ||
979 ecoalesce->rx_coalesce_usecs_low ||
980 ecoalesce->rx_max_coalesced_frames_low ||
981 ecoalesce->tx_coalesce_usecs_low ||
982 ecoalesce->tx_max_coalesced_frames_low ||
983 ecoalesce->pkt_rate_high ||
984 ecoalesce->rx_coalesce_usecs_high ||
985 ecoalesce->rx_max_coalesced_frames_high ||
986 ecoalesce->tx_coalesce_usecs_high ||
987 ecoalesce->tx_max_coalesced_frames_high ||
988 ecoalesce->rate_sample_interval)
990 if (ecoalesce->rx_max_coalesced_frames)
991 priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
992 if (ecoalesce->tx_max_coalesced_frames)
993 priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
998 static int nixge_ethtools_set_phys_id(struct net_device *ndev,
999 enum ethtool_phys_id_state state)
1001 struct nixge_priv *priv = netdev_priv(ndev);
1004 ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL);
1006 case ETHTOOL_ID_ACTIVE:
1007 ctrl |= NIXGE_ID_LED_CTL_EN;
1008 /* Enable identification LED override*/
1009 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1013 ctrl |= NIXGE_ID_LED_CTL_VAL;
1014 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1017 case ETHTOOL_ID_OFF:
1018 ctrl &= ~NIXGE_ID_LED_CTL_VAL;
1019 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1022 case ETHTOOL_ID_INACTIVE:
1023 /* Restore LED settings */
1024 ctrl &= ~NIXGE_ID_LED_CTL_EN;
1025 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1032 static const struct ethtool_ops nixge_ethtool_ops = {
1033 .get_drvinfo = nixge_ethtools_get_drvinfo,
1034 .get_coalesce = nixge_ethtools_get_coalesce,
1035 .set_coalesce = nixge_ethtools_set_coalesce,
1036 .set_phys_id = nixge_ethtools_set_phys_id,
1037 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1038 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1039 .get_link = ethtool_op_get_link,
1042 static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
1044 struct nixge_priv *priv = bus->priv;
1049 if (reg & MII_ADDR_C45) {
1050 device = (reg >> 16) & 0x1f;
1052 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1054 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1055 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1057 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1058 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1060 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1063 dev_err(priv->dev, "timeout setting address");
1067 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
1068 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1070 device = reg & 0x1f;
1072 tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
1073 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1076 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1077 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1079 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1082 dev_err(priv->dev, "timeout setting read command");
1086 status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
1091 static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
1093 struct nixge_priv *priv = bus->priv;
1098 if (reg & MII_ADDR_C45) {
1099 device = (reg >> 16) & 0x1f;
1101 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1103 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1104 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1106 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1107 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1109 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1112 dev_err(priv->dev, "timeout setting address");
1116 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
1117 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1119 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1120 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1121 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1124 dev_err(priv->dev, "timeout setting write command");
1126 device = reg & 0x1f;
1128 tmp = NIXGE_MDIO_CLAUSE22 |
1129 NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
1130 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1132 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1133 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1134 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1136 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1139 dev_err(priv->dev, "timeout setting write command");
1145 static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
1147 struct mii_bus *bus;
1149 bus = devm_mdiobus_alloc(priv->dev);
1153 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
1155 bus->name = "nixge_mii_bus";
1156 bus->read = nixge_mdio_read;
1157 bus->write = nixge_mdio_write;
1158 bus->parent = priv->dev;
1160 priv->mii_bus = bus;
1162 return of_mdiobus_register(bus, np);
1165 static void *nixge_get_nvmem_address(struct device *dev)
1167 struct nvmem_cell *cell;
1171 cell = nvmem_cell_get(dev, "address");
1175 mac = nvmem_cell_read(cell, &cell_size);
1176 nvmem_cell_put(cell);
1181 static int nixge_probe(struct platform_device *pdev)
1183 struct nixge_priv *priv;
1184 struct net_device *ndev;
1185 struct resource *dmares;
1186 const char *mac_addr;
1189 ndev = alloc_etherdev(sizeof(*priv));
1193 platform_set_drvdata(pdev, ndev);
1194 SET_NETDEV_DEV(ndev, &pdev->dev);
1196 ndev->features = NETIF_F_SG;
1197 ndev->netdev_ops = &nixge_netdev_ops;
1198 ndev->ethtool_ops = &nixge_ethtool_ops;
1200 /* MTU range: 64 - 9000 */
1202 ndev->max_mtu = NIXGE_JUMBO_MTU;
1204 mac_addr = nixge_get_nvmem_address(&pdev->dev);
1205 if (mac_addr && is_valid_ether_addr(mac_addr))
1206 ether_addr_copy(ndev->dev_addr, mac_addr);
1208 eth_hw_addr_random(ndev);
1210 priv = netdev_priv(ndev);
1212 priv->dev = &pdev->dev;
1214 netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
1216 dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1217 priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
1218 if (IS_ERR(priv->dma_regs)) {
1219 netdev_err(ndev, "failed to map dma regs\n");
1220 return PTR_ERR(priv->dma_regs);
1222 priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
1223 __nixge_hw_set_mac_address(ndev);
1225 priv->tx_irq = platform_get_irq_byname(pdev, "tx");
1226 if (priv->tx_irq < 0) {
1227 netdev_err(ndev, "could not find 'tx' irq");
1228 return priv->tx_irq;
1231 priv->rx_irq = platform_get_irq_byname(pdev, "rx");
1232 if (priv->rx_irq < 0) {
1233 netdev_err(ndev, "could not find 'rx' irq");
1234 return priv->rx_irq;
1237 priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1238 priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
1240 err = nixge_mdio_setup(priv, pdev->dev.of_node);
1242 netdev_err(ndev, "error registering mdio bus");
1246 priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
1247 if (priv->phy_mode < 0) {
1248 netdev_err(ndev, "not find \"phy-mode\" property\n");
1250 goto unregister_mdio;
1253 priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1254 if (!priv->phy_node) {
1255 netdev_err(ndev, "not find \"phy-handle\" property\n");
1257 goto unregister_mdio;
1260 err = register_netdev(priv->ndev);
1262 netdev_err(ndev, "register_netdev() error (%i)\n", err);
1263 goto unregister_mdio;
1269 mdiobus_unregister(priv->mii_bus);
1277 static int nixge_remove(struct platform_device *pdev)
1279 struct net_device *ndev = platform_get_drvdata(pdev);
1280 struct nixge_priv *priv = netdev_priv(ndev);
1282 unregister_netdev(ndev);
1284 mdiobus_unregister(priv->mii_bus);
1291 /* Match table for of_platform binding */
1292 static const struct of_device_id nixge_dt_ids[] = {
1293 { .compatible = "ni,xge-enet-2.00", },
1296 MODULE_DEVICE_TABLE(of, nixge_dt_ids);
1298 static struct platform_driver nixge_driver = {
1299 .probe = nixge_probe,
1300 .remove = nixge_remove,
1303 .of_match_table = of_match_ptr(nixge_dt_ids),
1306 module_platform_driver(nixge_driver);
1308 MODULE_LICENSE("GPL v2");
1309 MODULE_DESCRIPTION("National Instruments XGE Management MAC");
1310 MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>");