1 // SPDX-License-Identifier: GPL-2.0-only
3 * Broadcom GENET (Gigabit Ethernet) controller driver
5 * Copyright (c) 2014-2019 Broadcom
8 #define pr_fmt(fmt) "bcmgenet: " fmt
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/types.h>
14 #include <linux/fcntl.h>
15 #include <linux/interrupt.h>
16 #include <linux/string.h>
17 #include <linux/if_ether.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/delay.h>
21 #include <linux/platform_device.h>
22 #include <linux/dma-mapping.h>
24 #include <linux/clk.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_net.h>
29 #include <linux/of_platform.h>
32 #include <linux/mii.h>
33 #include <linux/ethtool.h>
34 #include <linux/netdevice.h>
35 #include <linux/inetdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
40 #include <linux/ipv6.h>
41 #include <linux/phy.h>
42 #include <linux/platform_data/bcmgenet.h>
44 #include <asm/unaligned.h>
48 /* Maximum number of hardware queues, downsized if needed */
49 #define GENET_MAX_MQ_CNT 4
51 /* Default highest priority queue for multi queue support */
52 #define GENET_Q0_PRIORITY 0
54 #define GENET_Q16_RX_BD_CNT \
55 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
56 #define GENET_Q16_TX_BD_CNT \
57 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
59 #define RX_BUF_LENGTH 2048
60 #define SKB_ALIGNMENT 32
62 /* Tx/Rx DMA register offset, skip 256 descriptors */
63 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
64 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
66 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
67 TOTAL_DESC * DMA_DESC_SIZE)
69 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
72 static inline void bcmgenet_writel(u32 value, void __iomem *offset)
74 /* MIPS chips strapped for BE will automagically configure the
75 * peripheral registers for CPU-native byte order.
77 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
78 __raw_writel(value, offset);
80 writel_relaxed(value, offset);
83 static inline u32 bcmgenet_readl(void __iomem *offset)
85 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
86 return __raw_readl(offset);
88 return readl_relaxed(offset);
91 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
92 void __iomem *d, u32 value)
94 bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
97 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
100 return bcmgenet_readl(d + DMA_DESC_LENGTH_STATUS);
103 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
107 bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
109 /* Register writes to GISB bus can take couple hundred nanoseconds
110 * and are done for each packet, save these expensive writes unless
111 * the platform is explicitly configured for 64-bits/LPAE.
113 #ifdef CONFIG_PHYS_ADDR_T_64BIT
114 if (priv->hw_params->flags & GENET_HAS_40BITS)
115 bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
119 /* Combined address + length/status setter */
120 static inline void dmadesc_set(struct bcmgenet_priv *priv,
121 void __iomem *d, dma_addr_t addr, u32 val)
123 dmadesc_set_addr(priv, d, addr);
124 dmadesc_set_length_status(priv, d, val);
127 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
132 addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO);
134 /* Register writes to GISB bus can take couple hundred nanoseconds
135 * and are done for each packet, save these expensive writes unless
136 * the platform is explicitly configured for 64-bits/LPAE.
138 #ifdef CONFIG_PHYS_ADDR_T_64BIT
139 if (priv->hw_params->flags & GENET_HAS_40BITS)
140 addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32;
145 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
147 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
150 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
152 if (GENET_IS_V1(priv))
153 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
155 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
158 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
160 if (GENET_IS_V1(priv))
161 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
163 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
166 /* These macros are defined to deal with register map change
167 * between GENET1.1 and GENET2. Only those currently being used
168 * by driver are defined.
170 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
172 if (GENET_IS_V1(priv))
173 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
175 return bcmgenet_readl(priv->base +
176 priv->hw_params->tbuf_offset + TBUF_CTRL);
179 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
181 if (GENET_IS_V1(priv))
182 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
184 bcmgenet_writel(val, priv->base +
185 priv->hw_params->tbuf_offset + TBUF_CTRL);
188 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
190 if (GENET_IS_V1(priv))
191 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
193 return bcmgenet_readl(priv->base +
194 priv->hw_params->tbuf_offset + TBUF_BP_MC);
197 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
199 if (GENET_IS_V1(priv))
200 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
202 bcmgenet_writel(val, priv->base +
203 priv->hw_params->tbuf_offset + TBUF_BP_MC);
206 /* RX/TX DMA register accessors */
243 static const u8 bcmgenet_dma_regs_v3plus[] = {
244 [DMA_RING_CFG] = 0x00,
247 [DMA_SCB_BURST_SIZE] = 0x0C,
248 [DMA_ARB_CTRL] = 0x2C,
249 [DMA_PRIORITY_0] = 0x30,
250 [DMA_PRIORITY_1] = 0x34,
251 [DMA_PRIORITY_2] = 0x38,
252 [DMA_RING0_TIMEOUT] = 0x2C,
253 [DMA_RING1_TIMEOUT] = 0x30,
254 [DMA_RING2_TIMEOUT] = 0x34,
255 [DMA_RING3_TIMEOUT] = 0x38,
256 [DMA_RING4_TIMEOUT] = 0x3c,
257 [DMA_RING5_TIMEOUT] = 0x40,
258 [DMA_RING6_TIMEOUT] = 0x44,
259 [DMA_RING7_TIMEOUT] = 0x48,
260 [DMA_RING8_TIMEOUT] = 0x4c,
261 [DMA_RING9_TIMEOUT] = 0x50,
262 [DMA_RING10_TIMEOUT] = 0x54,
263 [DMA_RING11_TIMEOUT] = 0x58,
264 [DMA_RING12_TIMEOUT] = 0x5c,
265 [DMA_RING13_TIMEOUT] = 0x60,
266 [DMA_RING14_TIMEOUT] = 0x64,
267 [DMA_RING15_TIMEOUT] = 0x68,
268 [DMA_RING16_TIMEOUT] = 0x6C,
269 [DMA_INDEX2RING_0] = 0x70,
270 [DMA_INDEX2RING_1] = 0x74,
271 [DMA_INDEX2RING_2] = 0x78,
272 [DMA_INDEX2RING_3] = 0x7C,
273 [DMA_INDEX2RING_4] = 0x80,
274 [DMA_INDEX2RING_5] = 0x84,
275 [DMA_INDEX2RING_6] = 0x88,
276 [DMA_INDEX2RING_7] = 0x8C,
279 static const u8 bcmgenet_dma_regs_v2[] = {
280 [DMA_RING_CFG] = 0x00,
283 [DMA_SCB_BURST_SIZE] = 0x0C,
284 [DMA_ARB_CTRL] = 0x30,
285 [DMA_PRIORITY_0] = 0x34,
286 [DMA_PRIORITY_1] = 0x38,
287 [DMA_PRIORITY_2] = 0x3C,
288 [DMA_RING0_TIMEOUT] = 0x2C,
289 [DMA_RING1_TIMEOUT] = 0x30,
290 [DMA_RING2_TIMEOUT] = 0x34,
291 [DMA_RING3_TIMEOUT] = 0x38,
292 [DMA_RING4_TIMEOUT] = 0x3c,
293 [DMA_RING5_TIMEOUT] = 0x40,
294 [DMA_RING6_TIMEOUT] = 0x44,
295 [DMA_RING7_TIMEOUT] = 0x48,
296 [DMA_RING8_TIMEOUT] = 0x4c,
297 [DMA_RING9_TIMEOUT] = 0x50,
298 [DMA_RING10_TIMEOUT] = 0x54,
299 [DMA_RING11_TIMEOUT] = 0x58,
300 [DMA_RING12_TIMEOUT] = 0x5c,
301 [DMA_RING13_TIMEOUT] = 0x60,
302 [DMA_RING14_TIMEOUT] = 0x64,
303 [DMA_RING15_TIMEOUT] = 0x68,
304 [DMA_RING16_TIMEOUT] = 0x6C,
307 static const u8 bcmgenet_dma_regs_v1[] = {
310 [DMA_SCB_BURST_SIZE] = 0x0C,
311 [DMA_ARB_CTRL] = 0x30,
312 [DMA_PRIORITY_0] = 0x34,
313 [DMA_PRIORITY_1] = 0x38,
314 [DMA_PRIORITY_2] = 0x3C,
315 [DMA_RING0_TIMEOUT] = 0x2C,
316 [DMA_RING1_TIMEOUT] = 0x30,
317 [DMA_RING2_TIMEOUT] = 0x34,
318 [DMA_RING3_TIMEOUT] = 0x38,
319 [DMA_RING4_TIMEOUT] = 0x3c,
320 [DMA_RING5_TIMEOUT] = 0x40,
321 [DMA_RING6_TIMEOUT] = 0x44,
322 [DMA_RING7_TIMEOUT] = 0x48,
323 [DMA_RING8_TIMEOUT] = 0x4c,
324 [DMA_RING9_TIMEOUT] = 0x50,
325 [DMA_RING10_TIMEOUT] = 0x54,
326 [DMA_RING11_TIMEOUT] = 0x58,
327 [DMA_RING12_TIMEOUT] = 0x5c,
328 [DMA_RING13_TIMEOUT] = 0x60,
329 [DMA_RING14_TIMEOUT] = 0x64,
330 [DMA_RING15_TIMEOUT] = 0x68,
331 [DMA_RING16_TIMEOUT] = 0x6C,
334 /* Set at runtime once bcmgenet version is known */
335 static const u8 *bcmgenet_dma_regs;
337 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
339 return netdev_priv(dev_get_drvdata(dev));
342 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
345 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
346 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
349 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
350 u32 val, enum dma_reg r)
352 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
353 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
356 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
359 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
360 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
363 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
364 u32 val, enum dma_reg r)
366 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
367 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
370 /* RDMA/TDMA ring registers and accessors
371 * we merge the common fields and just prefix with T/D the registers
372 * having different meaning depending on the direction
376 RDMA_WRITE_PTR = TDMA_READ_PTR,
378 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
380 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
382 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
388 DMA_MBUF_DONE_THRESH,
390 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
392 RDMA_READ_PTR = TDMA_WRITE_PTR,
394 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
397 /* GENET v4 supports 40-bits pointer addressing
398 * for obvious reasons the LO and HI word parts
399 * are contiguous, but this offsets the other
402 static const u8 genet_dma_ring_regs_v4[] = {
403 [TDMA_READ_PTR] = 0x00,
404 [TDMA_READ_PTR_HI] = 0x04,
405 [TDMA_CONS_INDEX] = 0x08,
406 [TDMA_PROD_INDEX] = 0x0C,
407 [DMA_RING_BUF_SIZE] = 0x10,
408 [DMA_START_ADDR] = 0x14,
409 [DMA_START_ADDR_HI] = 0x18,
410 [DMA_END_ADDR] = 0x1C,
411 [DMA_END_ADDR_HI] = 0x20,
412 [DMA_MBUF_DONE_THRESH] = 0x24,
413 [TDMA_FLOW_PERIOD] = 0x28,
414 [TDMA_WRITE_PTR] = 0x2C,
415 [TDMA_WRITE_PTR_HI] = 0x30,
418 static const u8 genet_dma_ring_regs_v123[] = {
419 [TDMA_READ_PTR] = 0x00,
420 [TDMA_CONS_INDEX] = 0x04,
421 [TDMA_PROD_INDEX] = 0x08,
422 [DMA_RING_BUF_SIZE] = 0x0C,
423 [DMA_START_ADDR] = 0x10,
424 [DMA_END_ADDR] = 0x14,
425 [DMA_MBUF_DONE_THRESH] = 0x18,
426 [TDMA_FLOW_PERIOD] = 0x1C,
427 [TDMA_WRITE_PTR] = 0x20,
430 /* Set at runtime once GENET version is known */
431 static const u8 *genet_dma_ring_regs;
433 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
437 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
438 (DMA_RING_SIZE * ring) +
439 genet_dma_ring_regs[r]);
442 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
443 unsigned int ring, u32 val,
446 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
447 (DMA_RING_SIZE * ring) +
448 genet_dma_ring_regs[r]);
451 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
455 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
456 (DMA_RING_SIZE * ring) +
457 genet_dma_ring_regs[r]);
460 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
461 unsigned int ring, u32 val,
464 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
465 (DMA_RING_SIZE * ring) +
466 genet_dma_ring_regs[r]);
469 static int bcmgenet_begin(struct net_device *dev)
471 struct bcmgenet_priv *priv = netdev_priv(dev);
473 /* Turn on the clock */
474 return clk_prepare_enable(priv->clk);
477 static void bcmgenet_complete(struct net_device *dev)
479 struct bcmgenet_priv *priv = netdev_priv(dev);
481 /* Turn off the clock */
482 clk_disable_unprepare(priv->clk);
485 static int bcmgenet_get_link_ksettings(struct net_device *dev,
486 struct ethtool_link_ksettings *cmd)
488 if (!netif_running(dev))
494 phy_ethtool_ksettings_get(dev->phydev, cmd);
499 static int bcmgenet_set_link_ksettings(struct net_device *dev,
500 const struct ethtool_link_ksettings *cmd)
502 if (!netif_running(dev))
508 return phy_ethtool_ksettings_set(dev->phydev, cmd);
511 static void bcmgenet_set_rx_csum(struct net_device *dev,
512 netdev_features_t wanted)
514 struct bcmgenet_priv *priv = netdev_priv(dev);
518 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
520 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
522 /* enable rx checksumming */
524 rbuf_chk_ctrl |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
526 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
527 priv->desc_rxchk_en = rx_csum_en;
529 /* If UniMAC forwards CRC, we need to skip over it to get
530 * a valid CHK bit to be set in the per-packet status word
532 if (rx_csum_en && priv->crc_fwd_en)
533 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
535 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
537 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
540 static void bcmgenet_set_tx_csum(struct net_device *dev,
541 netdev_features_t wanted)
543 struct bcmgenet_priv *priv = netdev_priv(dev);
545 u32 tbuf_ctrl, rbuf_ctrl;
547 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
548 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
550 desc_64b_en = !!(wanted & NETIF_F_HW_CSUM);
552 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
554 tbuf_ctrl |= RBUF_64B_EN;
555 rbuf_ctrl |= RBUF_64B_EN;
557 tbuf_ctrl &= ~RBUF_64B_EN;
558 rbuf_ctrl &= ~RBUF_64B_EN;
560 priv->desc_64b_en = desc_64b_en;
562 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
563 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
566 static int bcmgenet_set_features(struct net_device *dev,
567 netdev_features_t features)
569 struct bcmgenet_priv *priv = netdev_priv(dev);
573 ret = clk_prepare_enable(priv->clk);
577 /* Make sure we reflect the value of CRC_CMD_FWD */
578 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
579 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
581 bcmgenet_set_tx_csum(dev, features);
582 bcmgenet_set_rx_csum(dev, features);
584 clk_disable_unprepare(priv->clk);
589 static u32 bcmgenet_get_msglevel(struct net_device *dev)
591 struct bcmgenet_priv *priv = netdev_priv(dev);
593 return priv->msg_enable;
596 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
598 struct bcmgenet_priv *priv = netdev_priv(dev);
600 priv->msg_enable = level;
603 static int bcmgenet_get_coalesce(struct net_device *dev,
604 struct ethtool_coalesce *ec)
606 struct bcmgenet_priv *priv = netdev_priv(dev);
607 struct bcmgenet_rx_ring *ring;
610 ec->tx_max_coalesced_frames =
611 bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
612 DMA_MBUF_DONE_THRESH);
613 ec->rx_max_coalesced_frames =
614 bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
615 DMA_MBUF_DONE_THRESH);
616 ec->rx_coalesce_usecs =
617 bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
619 for (i = 0; i < priv->hw_params->rx_queues; i++) {
620 ring = &priv->rx_rings[i];
621 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
623 ring = &priv->rx_rings[DESC_INDEX];
624 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
629 static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
632 struct bcmgenet_priv *priv = ring->priv;
633 unsigned int i = ring->index;
636 bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH);
638 reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
639 reg &= ~DMA_TIMEOUT_MASK;
640 reg |= DIV_ROUND_UP(usecs * 1000, 8192);
641 bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
644 static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
645 struct ethtool_coalesce *ec)
647 struct dim_cq_moder moder;
650 ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
651 ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
652 usecs = ring->rx_coalesce_usecs;
653 pkts = ring->rx_max_coalesced_frames;
655 if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
656 moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
661 ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
662 bcmgenet_set_rx_coalesce(ring, usecs, pkts);
665 static int bcmgenet_set_coalesce(struct net_device *dev,
666 struct ethtool_coalesce *ec)
668 struct bcmgenet_priv *priv = netdev_priv(dev);
671 /* Base system clock is 125Mhz, DMA timeout is this reference clock
672 * divided by 1024, which yields roughly 8.192us, our maximum value
673 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
675 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
676 ec->tx_max_coalesced_frames == 0 ||
677 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
678 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
681 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
684 /* GENET TDMA hardware does not support a configurable timeout, but will
685 * always generate an interrupt either after MBDONE packets have been
686 * transmitted, or when the ring is empty.
688 if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
689 ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low ||
690 ec->use_adaptive_tx_coalesce)
693 /* Program all TX queues with the same values, as there is no
694 * ethtool knob to do coalescing on a per-queue basis
696 for (i = 0; i < priv->hw_params->tx_queues; i++)
697 bcmgenet_tdma_ring_writel(priv, i,
698 ec->tx_max_coalesced_frames,
699 DMA_MBUF_DONE_THRESH);
700 bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
701 ec->tx_max_coalesced_frames,
702 DMA_MBUF_DONE_THRESH);
704 for (i = 0; i < priv->hw_params->rx_queues; i++)
705 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
706 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
711 /* standard ethtool support functions. */
712 enum bcmgenet_stat_type {
713 BCMGENET_STAT_NETDEV = -1,
714 BCMGENET_STAT_MIB_RX,
715 BCMGENET_STAT_MIB_TX,
721 struct bcmgenet_stats {
722 char stat_string[ETH_GSTRING_LEN];
725 enum bcmgenet_stat_type type;
726 /* reg offset from UMAC base for misc counters */
730 #define STAT_NETDEV(m) { \
731 .stat_string = __stringify(m), \
732 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
733 .stat_offset = offsetof(struct net_device_stats, m), \
734 .type = BCMGENET_STAT_NETDEV, \
737 #define STAT_GENET_MIB(str, m, _type) { \
738 .stat_string = str, \
739 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
740 .stat_offset = offsetof(struct bcmgenet_priv, m), \
744 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
745 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
746 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
747 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
749 #define STAT_GENET_MISC(str, m, offset) { \
750 .stat_string = str, \
751 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
752 .stat_offset = offsetof(struct bcmgenet_priv, m), \
753 .type = BCMGENET_STAT_MISC, \
754 .reg_offset = offset, \
757 #define STAT_GENET_Q(num) \
758 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
759 tx_rings[num].packets), \
760 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
761 tx_rings[num].bytes), \
762 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
763 rx_rings[num].bytes), \
764 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
765 rx_rings[num].packets), \
766 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
767 rx_rings[num].errors), \
768 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
769 rx_rings[num].dropped)
771 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
772 * between the end of TX stats and the beginning of the RX RUNT
774 #define BCMGENET_STAT_OFFSET 0xc
776 /* Hardware counters must be kept in sync because the order/offset
777 * is important here (order in structure declaration = order in hardware)
779 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
781 STAT_NETDEV(rx_packets),
782 STAT_NETDEV(tx_packets),
783 STAT_NETDEV(rx_bytes),
784 STAT_NETDEV(tx_bytes),
785 STAT_NETDEV(rx_errors),
786 STAT_NETDEV(tx_errors),
787 STAT_NETDEV(rx_dropped),
788 STAT_NETDEV(tx_dropped),
789 STAT_NETDEV(multicast),
790 /* UniMAC RSV counters */
791 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
792 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
793 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
794 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
795 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
796 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
797 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
798 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
799 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
800 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
801 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
802 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
803 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
804 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
805 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
806 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
807 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
808 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
809 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
810 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
811 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
812 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
813 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
814 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
815 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
816 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
817 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
818 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
819 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
820 /* UniMAC TSV counters */
821 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
822 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
823 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
824 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
825 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
826 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
827 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
828 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
829 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
830 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
831 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
832 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
833 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
834 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
835 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
836 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
837 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
838 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
839 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
840 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
841 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
842 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
843 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
844 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
845 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
846 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
847 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
848 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
849 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
850 /* UniMAC RUNT counters */
851 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
852 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
853 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
854 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
855 /* Misc UniMAC counters */
856 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
857 UMAC_RBUF_OVFL_CNT_V1),
858 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
859 UMAC_RBUF_ERR_CNT_V1),
860 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
861 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
862 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
863 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
864 STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
865 STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
866 mib.tx_realloc_tsb_failed),
875 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
877 static void bcmgenet_get_drvinfo(struct net_device *dev,
878 struct ethtool_drvinfo *info)
880 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
881 strlcpy(info->version, "v2.0", sizeof(info->version));
884 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
886 switch (string_set) {
888 return BCMGENET_STATS_LEN;
894 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
901 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
902 memcpy(data + i * ETH_GSTRING_LEN,
903 bcmgenet_gstrings_stats[i].stat_string,
910 static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
916 case UMAC_RBUF_OVFL_CNT_V1:
917 if (GENET_IS_V2(priv))
918 new_offset = RBUF_OVFL_CNT_V2;
920 new_offset = RBUF_OVFL_CNT_V3PLUS;
922 val = bcmgenet_rbuf_readl(priv, new_offset);
923 /* clear if overflowed */
925 bcmgenet_rbuf_writel(priv, 0, new_offset);
927 case UMAC_RBUF_ERR_CNT_V1:
928 if (GENET_IS_V2(priv))
929 new_offset = RBUF_ERR_CNT_V2;
931 new_offset = RBUF_ERR_CNT_V3PLUS;
933 val = bcmgenet_rbuf_readl(priv, new_offset);
934 /* clear if overflowed */
936 bcmgenet_rbuf_writel(priv, 0, new_offset);
939 val = bcmgenet_umac_readl(priv, offset);
940 /* clear if overflowed */
942 bcmgenet_umac_writel(priv, 0, offset);
949 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
953 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
954 const struct bcmgenet_stats *s;
959 s = &bcmgenet_gstrings_stats[i];
961 case BCMGENET_STAT_NETDEV:
962 case BCMGENET_STAT_SOFT:
964 case BCMGENET_STAT_RUNT:
965 offset += BCMGENET_STAT_OFFSET;
967 case BCMGENET_STAT_MIB_TX:
968 offset += BCMGENET_STAT_OFFSET;
970 case BCMGENET_STAT_MIB_RX:
971 val = bcmgenet_umac_readl(priv,
972 UMAC_MIB_START + j + offset);
973 offset = 0; /* Reset Offset */
975 case BCMGENET_STAT_MISC:
976 if (GENET_IS_V1(priv)) {
977 val = bcmgenet_umac_readl(priv, s->reg_offset);
978 /* clear if overflowed */
980 bcmgenet_umac_writel(priv, 0,
983 val = bcmgenet_update_stat_misc(priv,
990 p = (char *)priv + s->stat_offset;
995 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
996 struct ethtool_stats *stats,
999 struct bcmgenet_priv *priv = netdev_priv(dev);
1002 if (netif_running(dev))
1003 bcmgenet_update_mib_counters(priv);
1005 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1006 const struct bcmgenet_stats *s;
1009 s = &bcmgenet_gstrings_stats[i];
1010 if (s->type == BCMGENET_STAT_NETDEV)
1011 p = (char *)&dev->stats;
1014 p += s->stat_offset;
1015 if (sizeof(unsigned long) != sizeof(u32) &&
1016 s->stat_sizeof == sizeof(unsigned long))
1017 data[i] = *(unsigned long *)p;
1019 data[i] = *(u32 *)p;
1023 static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
1025 struct bcmgenet_priv *priv = netdev_priv(dev);
1026 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
1029 if (enable && !priv->clk_eee_enabled) {
1030 clk_prepare_enable(priv->clk_eee);
1031 priv->clk_eee_enabled = true;
1034 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
1039 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
1041 /* Enable EEE and switch to a 27Mhz clock automatically */
1042 reg = bcmgenet_readl(priv->base + off);
1044 reg |= TBUF_EEE_EN | TBUF_PM_EN;
1046 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
1047 bcmgenet_writel(reg, priv->base + off);
1049 /* Do the same for thing for RBUF */
1050 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
1052 reg |= RBUF_EEE_EN | RBUF_PM_EN;
1054 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
1055 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
1057 if (!enable && priv->clk_eee_enabled) {
1058 clk_disable_unprepare(priv->clk_eee);
1059 priv->clk_eee_enabled = false;
1062 priv->eee.eee_enabled = enable;
1063 priv->eee.eee_active = enable;
1066 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
1068 struct bcmgenet_priv *priv = netdev_priv(dev);
1069 struct ethtool_eee *p = &priv->eee;
1071 if (GENET_IS_V1(priv))
1077 e->eee_enabled = p->eee_enabled;
1078 e->eee_active = p->eee_active;
1079 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
1081 return phy_ethtool_get_eee(dev->phydev, e);
1084 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
1086 struct bcmgenet_priv *priv = netdev_priv(dev);
1087 struct ethtool_eee *p = &priv->eee;
1090 if (GENET_IS_V1(priv))
1096 p->eee_enabled = e->eee_enabled;
1098 if (!p->eee_enabled) {
1099 bcmgenet_eee_enable_set(dev, false);
1101 ret = phy_init_eee(dev->phydev, 0);
1103 netif_err(priv, hw, dev, "EEE initialization failed\n");
1107 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
1108 bcmgenet_eee_enable_set(dev, true);
1111 return phy_ethtool_set_eee(dev->phydev, e);
1114 /* standard ethtool support functions. */
1115 static const struct ethtool_ops bcmgenet_ethtool_ops = {
1116 .begin = bcmgenet_begin,
1117 .complete = bcmgenet_complete,
1118 .get_strings = bcmgenet_get_strings,
1119 .get_sset_count = bcmgenet_get_sset_count,
1120 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
1121 .get_drvinfo = bcmgenet_get_drvinfo,
1122 .get_link = ethtool_op_get_link,
1123 .get_msglevel = bcmgenet_get_msglevel,
1124 .set_msglevel = bcmgenet_set_msglevel,
1125 .get_wol = bcmgenet_get_wol,
1126 .set_wol = bcmgenet_set_wol,
1127 .get_eee = bcmgenet_get_eee,
1128 .set_eee = bcmgenet_set_eee,
1129 .nway_reset = phy_ethtool_nway_reset,
1130 .get_coalesce = bcmgenet_get_coalesce,
1131 .set_coalesce = bcmgenet_set_coalesce,
1132 .get_link_ksettings = bcmgenet_get_link_ksettings,
1133 .set_link_ksettings = bcmgenet_set_link_ksettings,
1134 .get_ts_info = ethtool_op_get_ts_info,
1137 /* Power down the unimac, based on mode. */
1138 static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1139 enum bcmgenet_power_mode mode)
1145 case GENET_POWER_CABLE_SENSE:
1146 phy_detach(priv->dev->phydev);
1149 case GENET_POWER_WOL_MAGIC:
1150 ret = bcmgenet_wol_power_down_cfg(priv, mode);
1153 case GENET_POWER_PASSIVE:
1154 /* Power down LED */
1155 if (priv->hw_params->flags & GENET_HAS_EXT) {
1156 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1157 if (GENET_IS_V5(priv))
1158 reg |= EXT_PWR_DOWN_PHY_EN |
1159 EXT_PWR_DOWN_PHY_RD |
1160 EXT_PWR_DOWN_PHY_SD |
1161 EXT_PWR_DOWN_PHY_RX |
1162 EXT_PWR_DOWN_PHY_TX |
1165 reg |= EXT_PWR_DOWN_PHY;
1167 reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1168 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1170 bcmgenet_phy_power_set(priv->dev, false);
1180 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1181 enum bcmgenet_power_mode mode)
1185 if (!(priv->hw_params->flags & GENET_HAS_EXT))
1188 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1191 case GENET_POWER_PASSIVE:
1192 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1193 if (GENET_IS_V5(priv)) {
1194 reg &= ~(EXT_PWR_DOWN_PHY_EN |
1195 EXT_PWR_DOWN_PHY_RD |
1196 EXT_PWR_DOWN_PHY_SD |
1197 EXT_PWR_DOWN_PHY_RX |
1198 EXT_PWR_DOWN_PHY_TX |
1200 reg |= EXT_PHY_RESET;
1201 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1204 reg &= ~EXT_PHY_RESET;
1206 reg &= ~EXT_PWR_DOWN_PHY;
1207 reg |= EXT_PWR_DN_EN_LD;
1209 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1210 bcmgenet_phy_power_set(priv->dev, true);
1213 case GENET_POWER_CABLE_SENSE:
1215 if (!GENET_IS_V5(priv)) {
1216 reg |= EXT_PWR_DN_EN_LD;
1217 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1220 case GENET_POWER_WOL_MAGIC:
1221 bcmgenet_wol_power_up_cfg(priv, mode);
1228 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1229 struct bcmgenet_tx_ring *ring)
1231 struct enet_cb *tx_cb_ptr;
1233 tx_cb_ptr = ring->cbs;
1234 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1236 /* Advancing local write pointer */
1237 if (ring->write_ptr == ring->end_ptr)
1238 ring->write_ptr = ring->cb_ptr;
1245 static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
1246 struct bcmgenet_tx_ring *ring)
1248 struct enet_cb *tx_cb_ptr;
1250 tx_cb_ptr = ring->cbs;
1251 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1253 /* Rewinding local write pointer */
1254 if (ring->write_ptr == ring->cb_ptr)
1255 ring->write_ptr = ring->end_ptr;
1262 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1264 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1265 INTRL2_CPU_MASK_SET);
1268 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1270 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1271 INTRL2_CPU_MASK_CLEAR);
1274 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1276 bcmgenet_intrl2_1_writel(ring->priv,
1277 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1278 INTRL2_CPU_MASK_SET);
1281 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1283 bcmgenet_intrl2_1_writel(ring->priv,
1284 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1285 INTRL2_CPU_MASK_CLEAR);
1288 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
1290 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1291 INTRL2_CPU_MASK_SET);
1294 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1296 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1297 INTRL2_CPU_MASK_CLEAR);
1300 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1302 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1303 INTRL2_CPU_MASK_CLEAR);
1306 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1308 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1309 INTRL2_CPU_MASK_SET);
1312 /* Simple helper to free a transmit control block's resources
1313 * Returns an skb when the last transmit control block associated with the
1314 * skb is freed. The skb should be freed by the caller if necessary.
1316 static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
1319 struct sk_buff *skb;
1325 if (cb == GENET_CB(skb)->first_cb)
1326 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1327 dma_unmap_len(cb, dma_len),
1330 dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
1331 dma_unmap_len(cb, dma_len),
1333 dma_unmap_addr_set(cb, dma_addr, 0);
1335 if (cb == GENET_CB(skb)->last_cb)
1338 } else if (dma_unmap_addr(cb, dma_addr)) {
1340 dma_unmap_addr(cb, dma_addr),
1341 dma_unmap_len(cb, dma_len),
1343 dma_unmap_addr_set(cb, dma_addr, 0);
1349 /* Simple helper to free a receive control block's resources */
1350 static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
1353 struct sk_buff *skb;
1358 if (dma_unmap_addr(cb, dma_addr)) {
1359 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1360 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
1361 dma_unmap_addr_set(cb, dma_addr, 0);
1367 /* Unlocked version of the reclaim routine */
1368 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1369 struct bcmgenet_tx_ring *ring)
1371 struct bcmgenet_priv *priv = netdev_priv(dev);
1372 unsigned int txbds_processed = 0;
1373 unsigned int bytes_compl = 0;
1374 unsigned int pkts_compl = 0;
1375 unsigned int txbds_ready;
1376 unsigned int c_index;
1377 struct sk_buff *skb;
1379 /* Clear status before servicing to reduce spurious interrupts */
1380 if (ring->index == DESC_INDEX)
1381 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
1384 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
1387 /* Compute how many buffers are transmitted since last xmit call */
1388 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
1390 txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
1392 netif_dbg(priv, tx_done, dev,
1393 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1394 __func__, ring->index, ring->c_index, c_index, txbds_ready);
1396 /* Reclaim transmitted buffers */
1397 while (txbds_processed < txbds_ready) {
1398 skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
1399 &priv->tx_cbs[ring->clean_ptr]);
1402 bytes_compl += GENET_CB(skb)->bytes_sent;
1403 dev_consume_skb_any(skb);
1407 if (likely(ring->clean_ptr < ring->end_ptr))
1410 ring->clean_ptr = ring->cb_ptr;
1413 ring->free_bds += txbds_processed;
1414 ring->c_index = c_index;
1416 ring->packets += pkts_compl;
1417 ring->bytes += bytes_compl;
1419 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
1420 pkts_compl, bytes_compl);
1422 return txbds_processed;
1425 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1426 struct bcmgenet_tx_ring *ring)
1428 unsigned int released;
1430 spin_lock_bh(&ring->lock);
1431 released = __bcmgenet_tx_reclaim(dev, ring);
1432 spin_unlock_bh(&ring->lock);
1437 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1439 struct bcmgenet_tx_ring *ring =
1440 container_of(napi, struct bcmgenet_tx_ring, napi);
1441 unsigned int work_done = 0;
1442 struct netdev_queue *txq;
1444 spin_lock(&ring->lock);
1445 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
1446 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1447 txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
1448 netif_tx_wake_queue(txq);
1450 spin_unlock(&ring->lock);
1452 if (work_done == 0) {
1453 napi_complete(napi);
1454 ring->int_enable(ring);
1462 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1464 struct bcmgenet_priv *priv = netdev_priv(dev);
1467 if (netif_is_multiqueue(dev)) {
1468 for (i = 0; i < priv->hw_params->tx_queues; i++)
1469 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1472 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1475 /* Reallocate the SKB to put enough headroom in front of it and insert
1476 * the transmit checksum offsets in the descriptors
1478 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1479 struct sk_buff *skb)
1481 struct bcmgenet_priv *priv = netdev_priv(dev);
1482 struct status_64 *status = NULL;
1483 struct sk_buff *new_skb;
1489 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1490 /* If 64 byte status block enabled, must make sure skb has
1491 * enough headroom for us to insert 64B status block.
1493 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1495 dev_kfree_skb_any(skb);
1496 priv->mib.tx_realloc_tsb_failed++;
1497 dev->stats.tx_dropped++;
1500 dev_consume_skb_any(skb);
1502 priv->mib.tx_realloc_tsb++;
1505 skb_push(skb, sizeof(*status));
1506 status = (struct status_64 *)skb->data;
1508 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1509 ip_ver = skb->protocol;
1511 case htons(ETH_P_IP):
1512 ip_proto = ip_hdr(skb)->protocol;
1514 case htons(ETH_P_IPV6):
1515 ip_proto = ipv6_hdr(skb)->nexthdr;
1518 /* don't use UDP flag */
1523 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1524 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1525 (offset + skb->csum_offset) |
1528 /* Set the special UDP flag for UDP */
1529 if (ip_proto == IPPROTO_UDP)
1530 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1532 status->tx_csum_info = tx_csum_info;
1538 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1540 struct bcmgenet_priv *priv = netdev_priv(dev);
1541 struct device *kdev = &priv->pdev->dev;
1542 struct bcmgenet_tx_ring *ring = NULL;
1543 struct enet_cb *tx_cb_ptr;
1544 struct netdev_queue *txq;
1545 int nr_frags, index;
1553 index = skb_get_queue_mapping(skb);
1554 /* Mapping strategy:
1555 * queue_mapping = 0, unclassified, packet xmited through ring16
1556 * queue_mapping = 1, goes to ring 0. (highest priority queue
1557 * queue_mapping = 2, goes to ring 1.
1558 * queue_mapping = 3, goes to ring 2.
1559 * queue_mapping = 4, goes to ring 3.
1566 ring = &priv->tx_rings[index];
1567 txq = netdev_get_tx_queue(dev, ring->queue);
1569 nr_frags = skb_shinfo(skb)->nr_frags;
1571 spin_lock(&ring->lock);
1572 if (ring->free_bds <= (nr_frags + 1)) {
1573 if (!netif_tx_queue_stopped(txq)) {
1574 netif_tx_stop_queue(txq);
1576 "%s: tx ring %d full when queue %d awake\n",
1577 __func__, index, ring->queue);
1579 ret = NETDEV_TX_BUSY;
1583 if (skb_padto(skb, ETH_ZLEN)) {
1588 /* Retain how many bytes will be sent on the wire, without TSB inserted
1589 * by transmit checksum offload
1591 GENET_CB(skb)->bytes_sent = skb->len;
1593 /* set the SKB transmit checksum */
1594 if (priv->desc_64b_en) {
1595 skb = bcmgenet_put_tx_csum(dev, skb);
1602 for (i = 0; i <= nr_frags; i++) {
1603 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1608 /* Transmit single SKB or head of fragment list */
1609 GENET_CB(skb)->first_cb = tx_cb_ptr;
1610 size = skb_headlen(skb);
1611 mapping = dma_map_single(kdev, skb->data, size,
1615 frag = &skb_shinfo(skb)->frags[i - 1];
1616 size = skb_frag_size(frag);
1617 mapping = skb_frag_dma_map(kdev, frag, 0, size,
1621 ret = dma_mapping_error(kdev, mapping);
1623 priv->mib.tx_dma_failed++;
1624 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1626 goto out_unmap_frags;
1628 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1629 dma_unmap_len_set(tx_cb_ptr, dma_len, size);
1631 tx_cb_ptr->skb = skb;
1633 len_stat = (size << DMA_BUFLENGTH_SHIFT) |
1634 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
1637 len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
1638 if (skb->ip_summed == CHECKSUM_PARTIAL)
1639 len_stat |= DMA_TX_DO_CSUM;
1642 len_stat |= DMA_EOP;
1644 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
1647 GENET_CB(skb)->last_cb = tx_cb_ptr;
1648 skb_tx_timestamp(skb);
1650 /* Decrement total BD count and advance our write pointer */
1651 ring->free_bds -= nr_frags + 1;
1652 ring->prod_index += nr_frags + 1;
1653 ring->prod_index &= DMA_P_INDEX_MASK;
1655 netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
1657 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1658 netif_tx_stop_queue(txq);
1660 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
1661 /* Packets are ready, update producer index */
1662 bcmgenet_tdma_ring_writel(priv, ring->index,
1663 ring->prod_index, TDMA_PROD_INDEX);
1665 spin_unlock(&ring->lock);
1670 /* Back up for failed control block mapping */
1671 bcmgenet_put_txcb(priv, ring);
1673 /* Unmap successfully mapped control blocks */
1675 tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
1676 bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
1683 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1686 struct device *kdev = &priv->pdev->dev;
1687 struct sk_buff *skb;
1688 struct sk_buff *rx_skb;
1691 /* Allocate a new Rx skb */
1692 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
1694 priv->mib.alloc_rx_buff_failed++;
1695 netif_err(priv, rx_err, priv->dev,
1696 "%s: Rx skb allocation failed\n", __func__);
1700 /* DMA-map the new Rx skb */
1701 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
1703 if (dma_mapping_error(kdev, mapping)) {
1704 priv->mib.rx_dma_failed++;
1705 dev_kfree_skb_any(skb);
1706 netif_err(priv, rx_err, priv->dev,
1707 "%s: Rx skb DMA mapping failed\n", __func__);
1711 /* Grab the current Rx skb from the ring and DMA-unmap it */
1712 rx_skb = bcmgenet_free_rx_cb(kdev, cb);
1714 /* Put the new Rx skb on the ring */
1716 dma_unmap_addr_set(cb, dma_addr, mapping);
1717 dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
1718 dmadesc_set_addr(priv, cb->bd_addr, mapping);
1720 /* Return the current Rx skb to caller */
1724 /* bcmgenet_desc_rx - descriptor based rx process.
1725 * this could be called from bottom half, or from NAPI polling method.
1727 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1728 unsigned int budget)
1730 struct bcmgenet_priv *priv = ring->priv;
1731 struct net_device *dev = priv->dev;
1733 struct sk_buff *skb;
1734 u32 dma_length_status;
1735 unsigned long dma_flag;
1737 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1738 unsigned int bytes_processed = 0;
1739 unsigned int p_index, mask;
1740 unsigned int discards;
1742 /* Clear status before servicing to reduce spurious interrupts */
1743 if (ring->index == DESC_INDEX) {
1744 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
1747 mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
1748 bcmgenet_intrl2_1_writel(priv,
1753 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
1755 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1756 DMA_P_INDEX_DISCARD_CNT_MASK;
1757 if (discards > ring->old_discards) {
1758 discards = discards - ring->old_discards;
1759 ring->errors += discards;
1760 ring->old_discards += discards;
1762 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1763 if (ring->old_discards >= 0xC000) {
1764 ring->old_discards = 0;
1765 bcmgenet_rdma_ring_writel(priv, ring->index, 0,
1770 p_index &= DMA_P_INDEX_MASK;
1771 rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
1773 netif_dbg(priv, rx_status, dev,
1774 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1776 while ((rxpktprocessed < rxpkttoprocess) &&
1777 (rxpktprocessed < budget)) {
1778 cb = &priv->rx_cbs[ring->read_ptr];
1779 skb = bcmgenet_rx_refill(priv, cb);
1781 if (unlikely(!skb)) {
1786 if (!priv->desc_64b_en) {
1788 dmadesc_get_length_status(priv, cb->bd_addr);
1790 struct status_64 *status;
1793 status = (struct status_64 *)skb->data;
1794 dma_length_status = status->length_status;
1795 rx_csum = (__force __be16)(status->rx_csum & 0xffff);
1796 if (priv->desc_rxchk_en) {
1797 skb->csum = (__force __wsum)ntohs(rx_csum);
1798 skb->ip_summed = CHECKSUM_COMPLETE;
1802 /* DMA flags and length are still valid no matter how
1803 * we got the Receive Status Vector (64B RSB or register)
1805 dma_flag = dma_length_status & 0xffff;
1806 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1808 netif_dbg(priv, rx_status, dev,
1809 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1810 __func__, p_index, ring->c_index,
1811 ring->read_ptr, dma_length_status);
1813 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1814 netif_err(priv, rx_status, dev,
1815 "dropping fragmented packet!\n");
1817 dev_kfree_skb_any(skb);
1822 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1827 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1828 (unsigned int)dma_flag);
1829 if (dma_flag & DMA_RX_CRC_ERROR)
1830 dev->stats.rx_crc_errors++;
1831 if (dma_flag & DMA_RX_OV)
1832 dev->stats.rx_over_errors++;
1833 if (dma_flag & DMA_RX_NO)
1834 dev->stats.rx_frame_errors++;
1835 if (dma_flag & DMA_RX_LG)
1836 dev->stats.rx_length_errors++;
1837 dev->stats.rx_errors++;
1838 dev_kfree_skb_any(skb);
1840 } /* error packet */
1843 if (priv->desc_64b_en) {
1848 /* remove hardware 2bytes added for IP alignment */
1852 if (priv->crc_fwd_en) {
1853 skb_trim(skb, len - ETH_FCS_LEN);
1857 bytes_processed += len;
1859 /*Finish setting up the received SKB and send it to the kernel*/
1860 skb->protocol = eth_type_trans(skb, priv->dev);
1863 if (dma_flag & DMA_RX_MULT)
1864 dev->stats.multicast++;
1867 napi_gro_receive(&ring->napi, skb);
1868 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1872 if (likely(ring->read_ptr < ring->end_ptr))
1875 ring->read_ptr = ring->cb_ptr;
1877 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
1878 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
1881 ring->dim.bytes = bytes_processed;
1882 ring->dim.packets = rxpktprocessed;
1884 return rxpktprocessed;
1887 /* Rx NAPI polling method */
1888 static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1890 struct bcmgenet_rx_ring *ring = container_of(napi,
1891 struct bcmgenet_rx_ring, napi);
1892 struct dim_sample dim_sample = {};
1893 unsigned int work_done;
1895 work_done = bcmgenet_desc_rx(ring, budget);
1897 if (work_done < budget) {
1898 napi_complete_done(napi, work_done);
1899 ring->int_enable(ring);
1902 if (ring->dim.use_dim) {
1903 dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
1904 ring->dim.bytes, &dim_sample);
1905 net_dim(&ring->dim.dim, dim_sample);
1911 static void bcmgenet_dim_work(struct work_struct *work)
1913 struct dim *dim = container_of(work, struct dim, work);
1914 struct bcmgenet_net_dim *ndim =
1915 container_of(dim, struct bcmgenet_net_dim, dim);
1916 struct bcmgenet_rx_ring *ring =
1917 container_of(ndim, struct bcmgenet_rx_ring, dim);
1918 struct dim_cq_moder cur_profile =
1919 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1921 bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
1922 dim->state = DIM_START_MEASURE;
1925 /* Assign skb to RX DMA descriptor. */
1926 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1927 struct bcmgenet_rx_ring *ring)
1930 struct sk_buff *skb;
1933 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
1935 /* loop here for each buffer needing assign */
1936 for (i = 0; i < ring->size; i++) {
1938 skb = bcmgenet_rx_refill(priv, cb);
1940 dev_consume_skb_any(skb);
1948 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1950 struct sk_buff *skb;
1954 for (i = 0; i < priv->num_rx_bds; i++) {
1955 cb = &priv->rx_cbs[i];
1957 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
1959 dev_consume_skb_any(skb);
1963 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1967 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1972 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1974 /* UniMAC stops on a packet boundary, wait for a full-size packet
1978 usleep_range(1000, 2000);
1981 static void reset_umac(struct bcmgenet_priv *priv)
1983 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1984 bcmgenet_rbuf_ctrl_set(priv, 0);
1987 /* disable MAC while updating its registers */
1988 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1990 /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
1991 bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
1994 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1996 /* Mask all interrupts.*/
1997 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1998 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1999 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
2000 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
2003 static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
2005 u32 int0_enable = 0;
2007 /* Monitor cable plug/unplugged event for internal PHY, external PHY
2010 if (priv->internal_phy) {
2011 int0_enable |= UMAC_IRQ_LINK_EVENT;
2012 if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
2013 int0_enable |= UMAC_IRQ_PHY_DET_R;
2014 } else if (priv->ext_phy) {
2015 int0_enable |= UMAC_IRQ_LINK_EVENT;
2016 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2017 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
2018 int0_enable |= UMAC_IRQ_LINK_EVENT;
2020 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2023 static void init_umac(struct bcmgenet_priv *priv)
2025 struct device *kdev = &priv->pdev->dev;
2027 u32 int0_enable = 0;
2029 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
2033 /* clear tx/rx counter */
2034 bcmgenet_umac_writel(priv,
2035 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
2037 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
2039 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2041 /* init rx registers, enable ip header optimization */
2042 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
2043 reg |= RBUF_ALIGN_2B;
2044 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
2046 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
2047 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
2049 bcmgenet_intr_disable(priv);
2051 /* Configure backpressure vectors for MoCA */
2052 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2053 reg = bcmgenet_bp_mc_get(priv);
2054 reg |= BIT(priv->hw_params->bp_in_en_shift);
2056 /* bp_mask: back pressure mask */
2057 if (netif_is_multiqueue(priv->dev))
2058 reg |= priv->hw_params->bp_in_mask;
2060 reg &= ~priv->hw_params->bp_in_mask;
2061 bcmgenet_bp_mc_set(priv, reg);
2064 /* Enable MDIO interrupts on GENET v3+ */
2065 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
2066 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2068 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2070 dev_dbg(kdev, "done init umac\n");
2073 static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
2074 void (*cb)(struct work_struct *work))
2076 struct bcmgenet_net_dim *dim = &ring->dim;
2078 INIT_WORK(&dim->dim.work, cb);
2079 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2085 static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
2087 struct bcmgenet_net_dim *dim = &ring->dim;
2088 struct dim_cq_moder moder;
2091 usecs = ring->rx_coalesce_usecs;
2092 pkts = ring->rx_max_coalesced_frames;
2094 /* If DIM was enabled, re-apply default parameters */
2096 moder = net_dim_get_def_rx_moderation(dim->dim.mode);
2101 bcmgenet_set_rx_coalesce(ring, usecs, pkts);
2104 /* Initialize a Tx ring along with corresponding hardware registers */
2105 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
2106 unsigned int index, unsigned int size,
2107 unsigned int start_ptr, unsigned int end_ptr)
2109 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
2110 u32 words_per_bd = WORDS_PER_BD(priv);
2111 u32 flow_period_val = 0;
2113 spin_lock_init(&ring->lock);
2115 ring->index = index;
2116 if (index == DESC_INDEX) {
2118 ring->int_enable = bcmgenet_tx_ring16_int_enable;
2119 ring->int_disable = bcmgenet_tx_ring16_int_disable;
2121 ring->queue = index + 1;
2122 ring->int_enable = bcmgenet_tx_ring_int_enable;
2123 ring->int_disable = bcmgenet_tx_ring_int_disable;
2125 ring->cbs = priv->tx_cbs + start_ptr;
2127 ring->clean_ptr = start_ptr;
2129 ring->free_bds = size;
2130 ring->write_ptr = start_ptr;
2131 ring->cb_ptr = start_ptr;
2132 ring->end_ptr = end_ptr - 1;
2133 ring->prod_index = 0;
2135 /* Set flow period for ring != 16 */
2136 if (index != DESC_INDEX)
2137 flow_period_val = ENET_MAX_MTU_SIZE << 16;
2139 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
2140 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
2141 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2142 /* Disable rate control for now */
2143 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
2145 bcmgenet_tdma_ring_writel(priv, index,
2146 ((size << DMA_RING_SIZE_SHIFT) |
2147 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2149 /* Set start and end address, read and write pointers */
2150 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2152 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2154 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2156 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2159 /* Initialize Tx NAPI */
2160 netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
2164 /* Initialize a RDMA ring */
2165 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
2166 unsigned int index, unsigned int size,
2167 unsigned int start_ptr, unsigned int end_ptr)
2169 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
2170 u32 words_per_bd = WORDS_PER_BD(priv);
2174 ring->index = index;
2175 if (index == DESC_INDEX) {
2176 ring->int_enable = bcmgenet_rx_ring16_int_enable;
2177 ring->int_disable = bcmgenet_rx_ring16_int_disable;
2179 ring->int_enable = bcmgenet_rx_ring_int_enable;
2180 ring->int_disable = bcmgenet_rx_ring_int_disable;
2182 ring->cbs = priv->rx_cbs + start_ptr;
2185 ring->read_ptr = start_ptr;
2186 ring->cb_ptr = start_ptr;
2187 ring->end_ptr = end_ptr - 1;
2189 ret = bcmgenet_alloc_rx_buffers(priv, ring);
2193 bcmgenet_init_dim(ring, bcmgenet_dim_work);
2194 bcmgenet_init_rx_coalesce(ring);
2196 /* Initialize Rx NAPI */
2197 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
2200 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2201 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2202 bcmgenet_rdma_ring_writel(priv, index,
2203 ((size << DMA_RING_SIZE_SHIFT) |
2204 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2205 bcmgenet_rdma_ring_writel(priv, index,
2206 (DMA_FC_THRESH_LO <<
2207 DMA_XOFF_THRESHOLD_SHIFT) |
2208 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
2210 /* Set start and end address, read and write pointers */
2211 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2213 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2215 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2217 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2223 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2226 struct bcmgenet_tx_ring *ring;
2228 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2229 ring = &priv->tx_rings[i];
2230 napi_enable(&ring->napi);
2231 ring->int_enable(ring);
2234 ring = &priv->tx_rings[DESC_INDEX];
2235 napi_enable(&ring->napi);
2236 ring->int_enable(ring);
2239 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2242 struct bcmgenet_tx_ring *ring;
2244 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2245 ring = &priv->tx_rings[i];
2246 napi_disable(&ring->napi);
2249 ring = &priv->tx_rings[DESC_INDEX];
2250 napi_disable(&ring->napi);
2253 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2256 struct bcmgenet_tx_ring *ring;
2258 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2259 ring = &priv->tx_rings[i];
2260 netif_napi_del(&ring->napi);
2263 ring = &priv->tx_rings[DESC_INDEX];
2264 netif_napi_del(&ring->napi);
2267 /* Initialize Tx queues
2269 * Queues 0-3 are priority-based, each one has 32 descriptors,
2270 * with queue 0 being the highest priority queue.
2272 * Queue 16 is the default Tx queue with
2273 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2275 * The transmit control block pool is then partitioned as follows:
2276 * - Tx queue 0 uses tx_cbs[0..31]
2277 * - Tx queue 1 uses tx_cbs[32..63]
2278 * - Tx queue 2 uses tx_cbs[64..95]
2279 * - Tx queue 3 uses tx_cbs[96..127]
2280 * - Tx queue 16 uses tx_cbs[128..255]
2282 static void bcmgenet_init_tx_queues(struct net_device *dev)
2284 struct bcmgenet_priv *priv = netdev_priv(dev);
2286 u32 dma_ctrl, ring_cfg;
2287 u32 dma_priority[3] = {0, 0, 0};
2289 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
2290 dma_enable = dma_ctrl & DMA_EN;
2291 dma_ctrl &= ~DMA_EN;
2292 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2297 /* Enable strict priority arbiter mode */
2298 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2300 /* Initialize Tx priority queues */
2301 for (i = 0; i < priv->hw_params->tx_queues; i++) {
2302 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
2303 i * priv->hw_params->tx_bds_per_q,
2304 (i + 1) * priv->hw_params->tx_bds_per_q);
2305 ring_cfg |= (1 << i);
2306 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2307 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2308 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
2311 /* Initialize Tx default queue 16 */
2312 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
2313 priv->hw_params->tx_queues *
2314 priv->hw_params->tx_bds_per_q,
2316 ring_cfg |= (1 << DESC_INDEX);
2317 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2318 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
2319 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
2320 DMA_PRIO_REG_SHIFT(DESC_INDEX));
2322 /* Set Tx queue priorities */
2323 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2324 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2325 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2327 /* Enable Tx queues */
2328 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2333 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2336 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2339 struct bcmgenet_rx_ring *ring;
2341 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2342 ring = &priv->rx_rings[i];
2343 napi_enable(&ring->napi);
2344 ring->int_enable(ring);
2347 ring = &priv->rx_rings[DESC_INDEX];
2348 napi_enable(&ring->napi);
2349 ring->int_enable(ring);
2352 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2355 struct bcmgenet_rx_ring *ring;
2357 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2358 ring = &priv->rx_rings[i];
2359 napi_disable(&ring->napi);
2360 cancel_work_sync(&ring->dim.dim.work);
2363 ring = &priv->rx_rings[DESC_INDEX];
2364 napi_disable(&ring->napi);
2365 cancel_work_sync(&ring->dim.dim.work);
2368 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2371 struct bcmgenet_rx_ring *ring;
2373 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2374 ring = &priv->rx_rings[i];
2375 netif_napi_del(&ring->napi);
2378 ring = &priv->rx_rings[DESC_INDEX];
2379 netif_napi_del(&ring->napi);
2382 /* Initialize Rx queues
2384 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2385 * used to direct traffic to these queues.
2387 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2389 static int bcmgenet_init_rx_queues(struct net_device *dev)
2391 struct bcmgenet_priv *priv = netdev_priv(dev);
2398 dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2399 dma_enable = dma_ctrl & DMA_EN;
2400 dma_ctrl &= ~DMA_EN;
2401 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2406 /* Initialize Rx priority queues */
2407 for (i = 0; i < priv->hw_params->rx_queues; i++) {
2408 ret = bcmgenet_init_rx_ring(priv, i,
2409 priv->hw_params->rx_bds_per_q,
2410 i * priv->hw_params->rx_bds_per_q,
2412 priv->hw_params->rx_bds_per_q);
2416 ring_cfg |= (1 << i);
2417 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2420 /* Initialize Rx default queue 16 */
2421 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2422 priv->hw_params->rx_queues *
2423 priv->hw_params->rx_bds_per_q,
2428 ring_cfg |= (1 << DESC_INDEX);
2429 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2432 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2434 /* Configure ring as descriptor ring and re-enable DMA if enabled */
2437 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2442 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2450 /* Disable TDMA to stop add more frames in TX DMA */
2451 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2453 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2455 /* Check TDMA status register to confirm TDMA is disabled */
2456 while (timeout++ < DMA_TIMEOUT_VAL) {
2457 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2458 if (reg & DMA_DISABLED)
2464 if (timeout == DMA_TIMEOUT_VAL) {
2465 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2469 /* Wait 10ms for packet drain in both tx and rx dma */
2470 usleep_range(10000, 20000);
2473 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2475 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2478 /* Check RDMA status register to confirm RDMA is disabled */
2479 while (timeout++ < DMA_TIMEOUT_VAL) {
2480 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2481 if (reg & DMA_DISABLED)
2487 if (timeout == DMA_TIMEOUT_VAL) {
2488 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2493 for (i = 0; i < priv->hw_params->rx_queues; i++)
2494 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2495 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2497 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2500 for (i = 0; i < priv->hw_params->tx_queues; i++)
2501 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2502 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2504 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2509 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2511 struct netdev_queue *txq;
2514 bcmgenet_fini_rx_napi(priv);
2515 bcmgenet_fini_tx_napi(priv);
2517 for (i = 0; i < priv->num_tx_bds; i++)
2518 dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
2521 for (i = 0; i < priv->hw_params->tx_queues; i++) {
2522 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
2523 netdev_tx_reset_queue(txq);
2526 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
2527 netdev_tx_reset_queue(txq);
2529 bcmgenet_free_rx_buffers(priv);
2530 kfree(priv->rx_cbs);
2531 kfree(priv->tx_cbs);
2534 /* init_edma: Initialize DMA control register */
2535 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2541 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2543 /* Initialize common Rx ring structures */
2544 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2545 priv->num_rx_bds = TOTAL_DESC;
2546 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
2551 for (i = 0; i < priv->num_rx_bds; i++) {
2552 cb = priv->rx_cbs + i;
2553 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2556 /* Initialize common TX ring structures */
2557 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2558 priv->num_tx_bds = TOTAL_DESC;
2559 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2561 if (!priv->tx_cbs) {
2562 kfree(priv->rx_cbs);
2566 for (i = 0; i < priv->num_tx_bds; i++) {
2567 cb = priv->tx_cbs + i;
2568 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2572 bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
2573 DMA_SCB_BURST_SIZE);
2575 /* Initialize Rx queues */
2576 ret = bcmgenet_init_rx_queues(priv->dev);
2578 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2579 bcmgenet_free_rx_buffers(priv);
2580 kfree(priv->rx_cbs);
2581 kfree(priv->tx_cbs);
2586 bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
2587 DMA_SCB_BURST_SIZE);
2589 /* Initialize Tx queues */
2590 bcmgenet_init_tx_queues(priv->dev);
2595 /* Interrupt bottom half */
2596 static void bcmgenet_irq_task(struct work_struct *work)
2598 unsigned int status;
2599 struct bcmgenet_priv *priv = container_of(
2600 work, struct bcmgenet_priv, bcmgenet_irq_work);
2602 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2604 spin_lock_irq(&priv->lock);
2605 status = priv->irq0_stat;
2606 priv->irq0_stat = 0;
2607 spin_unlock_irq(&priv->lock);
2609 if (status & UMAC_IRQ_PHY_DET_R &&
2610 priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
2611 phy_init_hw(priv->dev->phydev);
2612 genphy_config_aneg(priv->dev->phydev);
2615 /* Link UP/DOWN event */
2616 if (status & UMAC_IRQ_LINK_EVENT)
2617 phy_mac_interrupt(priv->dev->phydev);
2621 /* bcmgenet_isr1: handle Rx and Tx priority queues */
2622 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2624 struct bcmgenet_priv *priv = dev_id;
2625 struct bcmgenet_rx_ring *rx_ring;
2626 struct bcmgenet_tx_ring *tx_ring;
2627 unsigned int index, status;
2629 /* Read irq status */
2630 status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2631 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2633 /* clear interrupts */
2634 bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
2636 netif_dbg(priv, intr, priv->dev,
2637 "%s: IRQ=0x%x\n", __func__, status);
2639 /* Check Rx priority queue interrupts */
2640 for (index = 0; index < priv->hw_params->rx_queues; index++) {
2641 if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2644 rx_ring = &priv->rx_rings[index];
2645 rx_ring->dim.event_ctr++;
2647 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2648 rx_ring->int_disable(rx_ring);
2649 __napi_schedule_irqoff(&rx_ring->napi);
2653 /* Check Tx priority queue interrupts */
2654 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2655 if (!(status & BIT(index)))
2658 tx_ring = &priv->tx_rings[index];
2660 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2661 tx_ring->int_disable(tx_ring);
2662 __napi_schedule_irqoff(&tx_ring->napi);
2669 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
2670 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2672 struct bcmgenet_priv *priv = dev_id;
2673 struct bcmgenet_rx_ring *rx_ring;
2674 struct bcmgenet_tx_ring *tx_ring;
2675 unsigned int status;
2676 unsigned long flags;
2678 /* Read irq status */
2679 status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2680 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2682 /* clear interrupts */
2683 bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
2685 netif_dbg(priv, intr, priv->dev,
2686 "IRQ=0x%x\n", status);
2688 if (status & UMAC_IRQ_RXDMA_DONE) {
2689 rx_ring = &priv->rx_rings[DESC_INDEX];
2690 rx_ring->dim.event_ctr++;
2692 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2693 rx_ring->int_disable(rx_ring);
2694 __napi_schedule_irqoff(&rx_ring->napi);
2698 if (status & UMAC_IRQ_TXDMA_DONE) {
2699 tx_ring = &priv->tx_rings[DESC_INDEX];
2701 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2702 tx_ring->int_disable(tx_ring);
2703 __napi_schedule_irqoff(&tx_ring->napi);
2707 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2708 status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2712 /* all other interested interrupts handled in bottom half */
2713 status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
2715 /* Save irq status for bottom-half processing. */
2716 spin_lock_irqsave(&priv->lock, flags);
2717 priv->irq0_stat |= status;
2718 spin_unlock_irqrestore(&priv->lock, flags);
2720 schedule_work(&priv->bcmgenet_irq_work);
2726 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2728 struct bcmgenet_priv *priv = dev_id;
2730 pm_wakeup_event(&priv->pdev->dev, 0);
2735 #ifdef CONFIG_NET_POLL_CONTROLLER
2736 static void bcmgenet_poll_controller(struct net_device *dev)
2738 struct bcmgenet_priv *priv = netdev_priv(dev);
2740 /* Invoke the main RX/TX interrupt handler */
2741 disable_irq(priv->irq0);
2742 bcmgenet_isr0(priv->irq0, priv);
2743 enable_irq(priv->irq0);
2745 /* And the interrupt handler for RX/TX priority queues */
2746 disable_irq(priv->irq1);
2747 bcmgenet_isr1(priv->irq1, priv);
2748 enable_irq(priv->irq1);
2752 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2756 reg = bcmgenet_rbuf_ctrl_get(priv);
2758 bcmgenet_rbuf_ctrl_set(priv, reg);
2762 bcmgenet_rbuf_ctrl_set(priv, reg);
2766 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2767 unsigned char *addr)
2769 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2770 (addr[2] << 8) | addr[3], UMAC_MAC0);
2771 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2774 /* Returns a reusable dma control register value */
2775 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2781 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2782 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2784 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2786 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2788 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2790 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2792 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2797 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2801 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2803 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2805 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2807 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2810 /* bcmgenet_hfb_clear
2812 * Clear Hardware Filter Block and disable all filtering.
2814 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
2818 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
2819 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
2820 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
2822 for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
2823 bcmgenet_rdma_writel(priv, 0x0, i);
2825 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
2826 bcmgenet_hfb_reg_writel(priv, 0x0,
2827 HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
2829 for (i = 0; i < priv->hw_params->hfb_filter_cnt *
2830 priv->hw_params->hfb_filter_size; i++)
2831 bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
2834 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
2836 if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
2839 bcmgenet_hfb_clear(priv);
2842 static void bcmgenet_netif_start(struct net_device *dev)
2844 struct bcmgenet_priv *priv = netdev_priv(dev);
2846 /* Start the network engine */
2847 bcmgenet_enable_rx_napi(priv);
2849 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2851 bcmgenet_enable_tx_napi(priv);
2853 /* Monitor link interrupts now */
2854 bcmgenet_link_intr_enable(priv);
2856 phy_start(dev->phydev);
2859 static int bcmgenet_open(struct net_device *dev)
2861 struct bcmgenet_priv *priv = netdev_priv(dev);
2862 unsigned long dma_ctrl;
2866 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2868 /* Turn on the clock */
2869 clk_prepare_enable(priv->clk);
2871 /* If this is an internal GPHY, power it back on now, before UniMAC is
2872 * brought out of reset as absolutely no UniMAC activity is allowed
2874 if (priv->internal_phy)
2875 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2877 /* take MAC out of reset */
2878 bcmgenet_umac_reset(priv);
2882 /* Apply features again in case we changed them while interface was
2885 bcmgenet_set_features(dev, dev->features);
2887 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2889 if (priv->internal_phy) {
2890 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2891 reg |= EXT_ENERGY_DET_MASK;
2892 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2895 /* Disable RX/TX DMA and flush TX queues */
2896 dma_ctrl = bcmgenet_dma_disable(priv);
2898 /* Reinitialize TDMA and RDMA and SW housekeeping */
2899 ret = bcmgenet_init_dma(priv);
2901 netdev_err(dev, "failed to initialize DMA\n");
2902 goto err_clk_disable;
2905 /* Always enable ring 16 - descriptor ring */
2906 bcmgenet_enable_dma(priv, dma_ctrl);
2909 bcmgenet_hfb_init(priv);
2911 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2914 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2918 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2921 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2925 ret = bcmgenet_mii_probe(dev);
2927 netdev_err(dev, "failed to connect to PHY\n");
2931 bcmgenet_netif_start(dev);
2933 netif_tx_start_all_queues(dev);
2938 free_irq(priv->irq1, priv);
2940 free_irq(priv->irq0, priv);
2942 bcmgenet_dma_teardown(priv);
2943 bcmgenet_fini_dma(priv);
2945 if (priv->internal_phy)
2946 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2947 clk_disable_unprepare(priv->clk);
2951 static void bcmgenet_netif_stop(struct net_device *dev)
2953 struct bcmgenet_priv *priv = netdev_priv(dev);
2955 bcmgenet_disable_tx_napi(priv);
2956 netif_tx_disable(dev);
2958 /* Disable MAC receive */
2959 umac_enable_set(priv, CMD_RX_EN, false);
2961 bcmgenet_dma_teardown(priv);
2963 /* Disable MAC transmit. TX DMA disabled must be done before this */
2964 umac_enable_set(priv, CMD_TX_EN, false);
2966 phy_stop(dev->phydev);
2967 bcmgenet_disable_rx_napi(priv);
2968 bcmgenet_intr_disable(priv);
2970 /* Wait for pending work items to complete. Since interrupts are
2971 * disabled no new work will be scheduled.
2973 cancel_work_sync(&priv->bcmgenet_irq_work);
2975 priv->old_link = -1;
2976 priv->old_speed = -1;
2977 priv->old_duplex = -1;
2978 priv->old_pause = -1;
2981 bcmgenet_tx_reclaim_all(dev);
2982 bcmgenet_fini_dma(priv);
2985 static int bcmgenet_close(struct net_device *dev)
2987 struct bcmgenet_priv *priv = netdev_priv(dev);
2990 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2992 bcmgenet_netif_stop(dev);
2994 /* Really kill the PHY state machine and disconnect from it */
2995 phy_disconnect(dev->phydev);
2997 free_irq(priv->irq0, priv);
2998 free_irq(priv->irq1, priv);
3000 if (priv->internal_phy)
3001 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3003 clk_disable_unprepare(priv->clk);
3008 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
3010 struct bcmgenet_priv *priv = ring->priv;
3011 u32 p_index, c_index, intsts, intmsk;
3012 struct netdev_queue *txq;
3013 unsigned int free_bds;
3016 if (!netif_msg_tx_err(priv))
3019 txq = netdev_get_tx_queue(priv->dev, ring->queue);
3021 spin_lock(&ring->lock);
3022 if (ring->index == DESC_INDEX) {
3023 intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
3024 intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
3026 intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
3027 intmsk = 1 << ring->index;
3029 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
3030 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
3031 txq_stopped = netif_tx_queue_stopped(txq);
3032 free_bds = ring->free_bds;
3033 spin_unlock(&ring->lock);
3035 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
3036 "TX queue status: %s, interrupts: %s\n"
3037 "(sw)free_bds: %d (sw)size: %d\n"
3038 "(sw)p_index: %d (hw)p_index: %d\n"
3039 "(sw)c_index: %d (hw)c_index: %d\n"
3040 "(sw)clean_p: %d (sw)write_p: %d\n"
3041 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
3042 ring->index, ring->queue,
3043 txq_stopped ? "stopped" : "active",
3044 intsts & intmsk ? "enabled" : "disabled",
3045 free_bds, ring->size,
3046 ring->prod_index, p_index & DMA_P_INDEX_MASK,
3047 ring->c_index, c_index & DMA_C_INDEX_MASK,
3048 ring->clean_ptr, ring->write_ptr,
3049 ring->cb_ptr, ring->end_ptr);
3052 static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
3054 struct bcmgenet_priv *priv = netdev_priv(dev);
3055 u32 int0_enable = 0;
3056 u32 int1_enable = 0;
3059 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
3061 for (q = 0; q < priv->hw_params->tx_queues; q++)
3062 bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
3063 bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
3065 bcmgenet_tx_reclaim_all(dev);
3067 for (q = 0; q < priv->hw_params->tx_queues; q++)
3068 int1_enable |= (1 << q);
3070 int0_enable = UMAC_IRQ_TXDMA_DONE;
3072 /* Re-enable TX interrupts if disabled */
3073 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
3074 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
3076 netif_trans_update(dev);
3078 dev->stats.tx_errors++;
3080 netif_tx_wake_all_queues(dev);
3083 #define MAX_MDF_FILTER 17
3085 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3086 unsigned char *addr,
3089 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3090 UMAC_MDF_ADDR + (*i * 4));
3091 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3092 addr[4] << 8 | addr[5],
3093 UMAC_MDF_ADDR + ((*i + 1) * 4));
3097 static void bcmgenet_set_rx_mode(struct net_device *dev)
3099 struct bcmgenet_priv *priv = netdev_priv(dev);
3100 struct netdev_hw_addr *ha;
3104 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3106 /* Number of filters needed */
3107 nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
3110 * Turn on promicuous mode for three scenarios
3111 * 1. IFF_PROMISC flag is set
3112 * 2. IFF_ALLMULTI flag is set
3113 * 3. The number of filters needed exceeds the number filters
3114 * supported by the hardware.
3116 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
3117 if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
3118 (nfilter > MAX_MDF_FILTER)) {
3120 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3121 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3124 reg &= ~CMD_PROMISC;
3125 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3128 /* update MDF filter */
3131 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
3132 /* my own address.*/
3133 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
3136 netdev_for_each_uc_addr(ha, dev)
3137 bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3140 netdev_for_each_mc_addr(ha, dev)
3141 bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3143 /* Enable filters */
3144 reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
3145 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
3148 /* Set the hardware MAC address. */
3149 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3151 struct sockaddr *addr = p;
3153 /* Setting the MAC address at the hardware level is not possible
3154 * without disabling the UniMAC RX/TX enable bits.
3156 if (netif_running(dev))
3159 ether_addr_copy(dev->dev_addr, addr->sa_data);
3164 static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
3166 struct bcmgenet_priv *priv = netdev_priv(dev);
3167 unsigned long tx_bytes = 0, tx_packets = 0;
3168 unsigned long rx_bytes = 0, rx_packets = 0;
3169 unsigned long rx_errors = 0, rx_dropped = 0;
3170 struct bcmgenet_tx_ring *tx_ring;
3171 struct bcmgenet_rx_ring *rx_ring;
3174 for (q = 0; q < priv->hw_params->tx_queues; q++) {
3175 tx_ring = &priv->tx_rings[q];
3176 tx_bytes += tx_ring->bytes;
3177 tx_packets += tx_ring->packets;
3179 tx_ring = &priv->tx_rings[DESC_INDEX];
3180 tx_bytes += tx_ring->bytes;
3181 tx_packets += tx_ring->packets;
3183 for (q = 0; q < priv->hw_params->rx_queues; q++) {
3184 rx_ring = &priv->rx_rings[q];
3186 rx_bytes += rx_ring->bytes;
3187 rx_packets += rx_ring->packets;
3188 rx_errors += rx_ring->errors;
3189 rx_dropped += rx_ring->dropped;
3191 rx_ring = &priv->rx_rings[DESC_INDEX];
3192 rx_bytes += rx_ring->bytes;
3193 rx_packets += rx_ring->packets;
3194 rx_errors += rx_ring->errors;
3195 rx_dropped += rx_ring->dropped;
3197 dev->stats.tx_bytes = tx_bytes;
3198 dev->stats.tx_packets = tx_packets;
3199 dev->stats.rx_bytes = rx_bytes;
3200 dev->stats.rx_packets = rx_packets;
3201 dev->stats.rx_errors = rx_errors;
3202 dev->stats.rx_missed_errors = rx_errors;
3206 static const struct net_device_ops bcmgenet_netdev_ops = {
3207 .ndo_open = bcmgenet_open,
3208 .ndo_stop = bcmgenet_close,
3209 .ndo_start_xmit = bcmgenet_xmit,
3210 .ndo_tx_timeout = bcmgenet_timeout,
3211 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
3212 .ndo_set_mac_address = bcmgenet_set_mac_addr,
3213 .ndo_do_ioctl = phy_do_ioctl_running,
3214 .ndo_set_features = bcmgenet_set_features,
3215 #ifdef CONFIG_NET_POLL_CONTROLLER
3216 .ndo_poll_controller = bcmgenet_poll_controller,
3218 .ndo_get_stats = bcmgenet_get_stats,
3221 /* Array of GENET hardware parameters/characteristics */
3222 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3228 .bp_in_en_shift = 16,
3229 .bp_in_mask = 0xffff,
3230 .hfb_filter_cnt = 16,
3232 .hfb_offset = 0x1000,
3233 .rdma_offset = 0x2000,
3234 .tdma_offset = 0x3000,
3242 .bp_in_en_shift = 16,
3243 .bp_in_mask = 0xffff,
3244 .hfb_filter_cnt = 16,
3246 .tbuf_offset = 0x0600,
3247 .hfb_offset = 0x1000,
3248 .hfb_reg_offset = 0x2000,
3249 .rdma_offset = 0x3000,
3250 .tdma_offset = 0x4000,
3252 .flags = GENET_HAS_EXT,
3259 .bp_in_en_shift = 17,
3260 .bp_in_mask = 0x1ffff,
3261 .hfb_filter_cnt = 48,
3262 .hfb_filter_size = 128,
3264 .tbuf_offset = 0x0600,
3265 .hfb_offset = 0x8000,
3266 .hfb_reg_offset = 0xfc00,
3267 .rdma_offset = 0x10000,
3268 .tdma_offset = 0x11000,
3270 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3271 GENET_HAS_MOCA_LINK_DET,
3278 .bp_in_en_shift = 17,
3279 .bp_in_mask = 0x1ffff,
3280 .hfb_filter_cnt = 48,
3281 .hfb_filter_size = 128,
3283 .tbuf_offset = 0x0600,
3284 .hfb_offset = 0x8000,
3285 .hfb_reg_offset = 0xfc00,
3286 .rdma_offset = 0x2000,
3287 .tdma_offset = 0x4000,
3289 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3290 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3297 .bp_in_en_shift = 17,
3298 .bp_in_mask = 0x1ffff,
3299 .hfb_filter_cnt = 48,
3300 .hfb_filter_size = 128,
3302 .tbuf_offset = 0x0600,
3303 .hfb_offset = 0x8000,
3304 .hfb_reg_offset = 0xfc00,
3305 .rdma_offset = 0x2000,
3306 .tdma_offset = 0x4000,
3308 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3309 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3313 /* Infer hardware parameters from the detected GENET version */
3314 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3316 struct bcmgenet_hw_params *params;
3321 if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
3322 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3323 genet_dma_ring_regs = genet_dma_ring_regs_v4;
3324 } else if (GENET_IS_V3(priv)) {
3325 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3326 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3327 } else if (GENET_IS_V2(priv)) {
3328 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3329 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3330 } else if (GENET_IS_V1(priv)) {
3331 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3332 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3335 /* enum genet_version starts at 1 */
3336 priv->hw_params = &bcmgenet_hw_params[priv->version];
3337 params = priv->hw_params;
3339 /* Read GENET HW version */
3340 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3341 major = (reg >> 24 & 0x0f);
3344 else if (major == 5)
3346 else if (major == 0)
3348 if (major != priv->version) {
3349 dev_err(&priv->pdev->dev,
3350 "GENET version mismatch, got: %d, configured for: %d\n",
3351 major, priv->version);
3354 /* Print the GENET core version */
3355 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3356 major, (reg >> 16) & 0x0f, reg & 0xffff);
3358 /* Store the integrated PHY revision for the MDIO probing function
3359 * to pass this information to the PHY driver. The PHY driver expects
3360 * to find the PHY major revision in bits 15:8 while the GENET register
3361 * stores that information in bits 7:0, account for that.
3363 * On newer chips, starting with PHY revision G0, a new scheme is
3364 * deployed similar to the Starfighter 2 switch with GPHY major
3365 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3366 * is reserved as well as special value 0x01ff, we have a small
3367 * heuristic to check for the new GPHY revision and re-arrange things
3368 * so the GPHY driver is happy.
3370 gphy_rev = reg & 0xffff;
3372 if (GENET_IS_V5(priv)) {
3373 /* The EPHY revision should come from the MDIO registers of
3374 * the PHY not from GENET.
3376 if (gphy_rev != 0) {
3377 pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
3380 /* This is reserved so should require special treatment */
3381 } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3382 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3384 /* This is the good old scheme, just GPHY major, no minor nor patch */
3385 } else if ((gphy_rev & 0xf0) != 0) {
3386 priv->gphy_rev = gphy_rev << 8;
3387 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3388 } else if ((gphy_rev & 0xff00) != 0) {
3389 priv->gphy_rev = gphy_rev;
3392 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3393 if (!(params->flags & GENET_HAS_40BITS))
3394 pr_warn("GENET does not support 40-bits PA\n");
3397 pr_debug("Configuration for version: %d\n"
3398 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3399 "BP << en: %2d, BP msk: 0x%05x\n"
3400 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3401 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3402 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3405 params->tx_queues, params->tx_bds_per_q,
3406 params->rx_queues, params->rx_bds_per_q,
3407 params->bp_in_en_shift, params->bp_in_mask,
3408 params->hfb_filter_cnt, params->qtag_mask,
3409 params->tbuf_offset, params->hfb_offset,
3410 params->hfb_reg_offset,
3411 params->rdma_offset, params->tdma_offset,
3412 params->words_per_bd);
3415 struct bcmgenet_plat_data {
3416 enum bcmgenet_version version;
3417 u32 dma_max_burst_length;
3420 static const struct bcmgenet_plat_data v1_plat_data = {
3421 .version = GENET_V1,
3422 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3425 static const struct bcmgenet_plat_data v2_plat_data = {
3426 .version = GENET_V2,
3427 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3430 static const struct bcmgenet_plat_data v3_plat_data = {
3431 .version = GENET_V3,
3432 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3435 static const struct bcmgenet_plat_data v4_plat_data = {
3436 .version = GENET_V4,
3437 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3440 static const struct bcmgenet_plat_data v5_plat_data = {
3441 .version = GENET_V5,
3442 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3445 static const struct bcmgenet_plat_data bcm2711_plat_data = {
3446 .version = GENET_V5,
3447 .dma_max_burst_length = 0x08,
3450 static const struct of_device_id bcmgenet_match[] = {
3451 { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
3452 { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
3453 { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
3454 { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
3455 { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
3456 { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
3459 MODULE_DEVICE_TABLE(of, bcmgenet_match);
3461 static int bcmgenet_probe(struct platform_device *pdev)
3463 struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3464 struct device_node *dn = pdev->dev.of_node;
3465 const struct of_device_id *of_id = NULL;
3466 const struct bcmgenet_plat_data *pdata;
3467 struct bcmgenet_priv *priv;
3468 struct net_device *dev;
3469 const void *macaddr;
3472 const char *phy_mode_str;
3474 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3475 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3476 GENET_MAX_MQ_CNT + 1);
3478 dev_err(&pdev->dev, "can't allocate net device\n");
3483 of_id = of_match_node(bcmgenet_match, dn);
3488 priv = netdev_priv(dev);
3489 priv->irq0 = platform_get_irq(pdev, 0);
3490 if (priv->irq0 < 0) {
3494 priv->irq1 = platform_get_irq(pdev, 1);
3495 if (priv->irq1 < 0) {
3499 priv->wol_irq = platform_get_irq_optional(pdev, 2);
3502 macaddr = of_get_mac_address(dn);
3504 macaddr = pd->mac_address;
3506 priv->base = devm_platform_ioremap_resource(pdev, 0);
3507 if (IS_ERR(priv->base)) {
3508 err = PTR_ERR(priv->base);
3512 spin_lock_init(&priv->lock);
3514 SET_NETDEV_DEV(dev, &pdev->dev);
3515 dev_set_drvdata(&pdev->dev, dev);
3516 if (IS_ERR_OR_NULL(macaddr) || !is_valid_ether_addr(macaddr)) {
3517 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
3518 eth_hw_addr_random(dev);
3520 ether_addr_copy(dev->dev_addr, macaddr);
3522 dev->watchdog_timeo = 2 * HZ;
3523 dev->ethtool_ops = &bcmgenet_ethtool_ops;
3524 dev->netdev_ops = &bcmgenet_netdev_ops;
3526 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
3528 /* Set default features */
3529 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
3531 dev->hw_features |= dev->features;
3532 dev->vlan_features |= dev->features;
3534 /* Request the WOL interrupt and advertise suspend if available */
3535 priv->wol_irq_disabled = true;
3536 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
3539 device_set_wakeup_capable(&pdev->dev, 1);
3541 /* Set the needed headroom to account for any possible
3542 * features enabling/disabling at runtime
3544 dev->needed_headroom += 64;
3546 netdev_boot_setup_check(dev);
3551 pdata = of_id->data;
3552 priv->version = pdata->version;
3553 priv->dma_max_burst_length = pdata->dma_max_burst_length;
3555 priv->version = pd->genet_version;
3556 priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
3559 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
3560 if (IS_ERR(priv->clk)) {
3561 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
3565 clk_prepare_enable(priv->clk);
3567 bcmgenet_set_hw_params(priv);
3570 if (priv->hw_params->flags & GENET_HAS_40BITS)
3571 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
3573 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3577 /* Mii wait queue */
3578 init_waitqueue_head(&priv->wq);
3579 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
3580 priv->rx_buf_len = RX_BUF_LENGTH;
3581 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
3583 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
3584 if (IS_ERR(priv->clk_wol)) {
3585 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
3586 priv->clk_wol = NULL;
3589 priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
3590 if (IS_ERR(priv->clk_eee)) {
3591 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
3592 priv->clk_eee = NULL;
3595 /* If this is an internal GPHY, power it on now, before UniMAC is
3596 * brought out of reset as absolutely no UniMAC activity is allowed
3598 if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
3599 !strcasecmp(phy_mode_str, "internal"))
3600 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3604 err = bcmgenet_mii_init(dev);
3606 goto err_clk_disable;
3608 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
3609 * just the ring 16 descriptor based TX
3611 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
3612 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
3614 /* Set default coalescing parameters */
3615 for (i = 0; i < priv->hw_params->rx_queues; i++)
3616 priv->rx_rings[i].rx_max_coalesced_frames = 1;
3617 priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
3619 /* libphy will determine the link state */
3620 netif_carrier_off(dev);
3622 /* Turn off the main clock, WOL clock is handled separately */
3623 clk_disable_unprepare(priv->clk);
3625 err = register_netdev(dev);
3632 clk_disable_unprepare(priv->clk);
3638 static int bcmgenet_remove(struct platform_device *pdev)
3640 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
3642 dev_set_drvdata(&pdev->dev, NULL);
3643 unregister_netdev(priv->dev);
3644 bcmgenet_mii_exit(priv->dev);
3645 free_netdev(priv->dev);
3650 static void bcmgenet_shutdown(struct platform_device *pdev)
3652 bcmgenet_remove(pdev);
3655 #ifdef CONFIG_PM_SLEEP
3656 static int bcmgenet_resume(struct device *d)
3658 struct net_device *dev = dev_get_drvdata(d);
3659 struct bcmgenet_priv *priv = netdev_priv(dev);
3660 unsigned long dma_ctrl;
3664 if (!netif_running(dev))
3667 /* Turn on the clock */
3668 ret = clk_prepare_enable(priv->clk);
3672 /* If this is an internal GPHY, power it back on now, before UniMAC is
3673 * brought out of reset as absolutely no UniMAC activity is allowed
3675 if (priv->internal_phy)
3676 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3678 bcmgenet_umac_reset(priv);
3682 /* From WOL-enabled suspend, switch to regular clock */
3684 clk_disable_unprepare(priv->clk_wol);
3686 phy_init_hw(dev->phydev);
3688 /* Speed settings must be restored */
3689 genphy_config_aneg(dev->phydev);
3690 bcmgenet_mii_config(priv->dev, false);
3692 /* Restore enabled features */
3693 bcmgenet_set_features(dev, dev->features);
3695 bcmgenet_set_hw_addr(priv, dev->dev_addr);
3697 if (priv->internal_phy) {
3698 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
3699 reg |= EXT_ENERGY_DET_MASK;
3700 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
3704 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
3706 /* Disable RX/TX DMA and flush TX queues */
3707 dma_ctrl = bcmgenet_dma_disable(priv);
3709 /* Reinitialize TDMA and RDMA and SW housekeeping */
3710 ret = bcmgenet_init_dma(priv);
3712 netdev_err(dev, "failed to initialize DMA\n");
3713 goto out_clk_disable;
3716 /* Always enable ring 16 - descriptor ring */
3717 bcmgenet_enable_dma(priv, dma_ctrl);
3719 if (!device_may_wakeup(d))
3720 phy_resume(dev->phydev);
3722 if (priv->eee.eee_enabled)
3723 bcmgenet_eee_enable_set(dev, true);
3725 bcmgenet_netif_start(dev);
3727 netif_device_attach(dev);
3732 if (priv->internal_phy)
3733 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3734 clk_disable_unprepare(priv->clk);
3738 static int bcmgenet_suspend(struct device *d)
3740 struct net_device *dev = dev_get_drvdata(d);
3741 struct bcmgenet_priv *priv = netdev_priv(dev);
3744 if (!netif_running(dev))
3747 netif_device_detach(dev);
3749 bcmgenet_netif_stop(dev);
3751 if (!device_may_wakeup(d))
3752 phy_suspend(dev->phydev);
3754 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
3755 if (device_may_wakeup(d) && priv->wolopts) {
3756 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
3757 clk_prepare_enable(priv->clk_wol);
3758 } else if (priv->internal_phy) {
3759 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3762 /* Turn off the clocks */
3763 clk_disable_unprepare(priv->clk);
3770 #endif /* CONFIG_PM_SLEEP */
3772 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
3774 static struct platform_driver bcmgenet_driver = {
3775 .probe = bcmgenet_probe,
3776 .remove = bcmgenet_remove,
3777 .shutdown = bcmgenet_shutdown,
3780 .of_match_table = bcmgenet_match,
3781 .pm = &bcmgenet_pm_ops,
3784 module_platform_driver(bcmgenet_driver);
3786 MODULE_AUTHOR("Broadcom Corporation");
3787 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3788 MODULE_ALIAS("platform:bcmgenet");
3789 MODULE_LICENSE("GPL");