1 // SPDX-License-Identifier: GPL-2.0-only
3 * Broadcom GENET (Gigabit Ethernet) controller driver
5 * Copyright (c) 2014-2020 Broadcom
8 #define pr_fmt(fmt) "bcmgenet: " fmt
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/interrupt.h>
17 #include <linux/string.h>
18 #include <linux/if_ether.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/delay.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma-mapping.h>
25 #include <linux/clk.h>
28 #include <linux/mii.h>
29 #include <linux/ethtool.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
36 #include <linux/ipv6.h>
37 #include <linux/phy.h>
38 #include <linux/platform_data/bcmgenet.h>
40 #include <asm/unaligned.h>
44 /* Maximum number of hardware queues, downsized if needed */
45 #define GENET_MAX_MQ_CNT 4
47 /* Default highest priority queue for multi queue support */
48 #define GENET_Q0_PRIORITY 0
50 #define GENET_Q16_RX_BD_CNT \
51 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
52 #define GENET_Q16_TX_BD_CNT \
53 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
55 #define RX_BUF_LENGTH 2048
56 #define SKB_ALIGNMENT 32
58 /* Tx/Rx DMA register offset, skip 256 descriptors */
59 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
60 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
62 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
63 TOTAL_DESC * DMA_DESC_SIZE)
65 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
66 TOTAL_DESC * DMA_DESC_SIZE)
68 /* Forward declarations */
69 static void bcmgenet_set_rx_mode(struct net_device *dev);
71 static inline void bcmgenet_writel(u32 value, void __iomem *offset)
73 /* MIPS chips strapped for BE will automagically configure the
74 * peripheral registers for CPU-native byte order.
76 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
77 __raw_writel(value, offset);
79 writel_relaxed(value, offset);
82 static inline u32 bcmgenet_readl(void __iomem *offset)
84 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
85 return __raw_readl(offset);
87 return readl_relaxed(offset);
90 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
91 void __iomem *d, u32 value)
93 bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
96 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
100 bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
102 /* Register writes to GISB bus can take couple hundred nanoseconds
103 * and are done for each packet, save these expensive writes unless
104 * the platform is explicitly configured for 64-bits/LPAE.
106 #ifdef CONFIG_PHYS_ADDR_T_64BIT
107 if (priv->hw_params->flags & GENET_HAS_40BITS)
108 bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
112 /* Combined address + length/status setter */
113 static inline void dmadesc_set(struct bcmgenet_priv *priv,
114 void __iomem *d, dma_addr_t addr, u32 val)
116 dmadesc_set_addr(priv, d, addr);
117 dmadesc_set_length_status(priv, d, val);
120 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
122 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
125 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
127 if (GENET_IS_V1(priv))
128 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
130 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
133 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
135 if (GENET_IS_V1(priv))
136 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
138 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
141 /* These macros are defined to deal with register map change
142 * between GENET1.1 and GENET2. Only those currently being used
143 * by driver are defined.
145 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
147 if (GENET_IS_V1(priv))
148 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
150 return bcmgenet_readl(priv->base +
151 priv->hw_params->tbuf_offset + TBUF_CTRL);
154 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
156 if (GENET_IS_V1(priv))
157 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
159 bcmgenet_writel(val, priv->base +
160 priv->hw_params->tbuf_offset + TBUF_CTRL);
163 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
165 if (GENET_IS_V1(priv))
166 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
168 return bcmgenet_readl(priv->base +
169 priv->hw_params->tbuf_offset + TBUF_BP_MC);
172 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
174 if (GENET_IS_V1(priv))
175 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
177 bcmgenet_writel(val, priv->base +
178 priv->hw_params->tbuf_offset + TBUF_BP_MC);
181 /* RX/TX DMA register accessors */
218 static const u8 bcmgenet_dma_regs_v3plus[] = {
219 [DMA_RING_CFG] = 0x00,
222 [DMA_SCB_BURST_SIZE] = 0x0C,
223 [DMA_ARB_CTRL] = 0x2C,
224 [DMA_PRIORITY_0] = 0x30,
225 [DMA_PRIORITY_1] = 0x34,
226 [DMA_PRIORITY_2] = 0x38,
227 [DMA_RING0_TIMEOUT] = 0x2C,
228 [DMA_RING1_TIMEOUT] = 0x30,
229 [DMA_RING2_TIMEOUT] = 0x34,
230 [DMA_RING3_TIMEOUT] = 0x38,
231 [DMA_RING4_TIMEOUT] = 0x3c,
232 [DMA_RING5_TIMEOUT] = 0x40,
233 [DMA_RING6_TIMEOUT] = 0x44,
234 [DMA_RING7_TIMEOUT] = 0x48,
235 [DMA_RING8_TIMEOUT] = 0x4c,
236 [DMA_RING9_TIMEOUT] = 0x50,
237 [DMA_RING10_TIMEOUT] = 0x54,
238 [DMA_RING11_TIMEOUT] = 0x58,
239 [DMA_RING12_TIMEOUT] = 0x5c,
240 [DMA_RING13_TIMEOUT] = 0x60,
241 [DMA_RING14_TIMEOUT] = 0x64,
242 [DMA_RING15_TIMEOUT] = 0x68,
243 [DMA_RING16_TIMEOUT] = 0x6C,
244 [DMA_INDEX2RING_0] = 0x70,
245 [DMA_INDEX2RING_1] = 0x74,
246 [DMA_INDEX2RING_2] = 0x78,
247 [DMA_INDEX2RING_3] = 0x7C,
248 [DMA_INDEX2RING_4] = 0x80,
249 [DMA_INDEX2RING_5] = 0x84,
250 [DMA_INDEX2RING_6] = 0x88,
251 [DMA_INDEX2RING_7] = 0x8C,
254 static const u8 bcmgenet_dma_regs_v2[] = {
255 [DMA_RING_CFG] = 0x00,
258 [DMA_SCB_BURST_SIZE] = 0x0C,
259 [DMA_ARB_CTRL] = 0x30,
260 [DMA_PRIORITY_0] = 0x34,
261 [DMA_PRIORITY_1] = 0x38,
262 [DMA_PRIORITY_2] = 0x3C,
263 [DMA_RING0_TIMEOUT] = 0x2C,
264 [DMA_RING1_TIMEOUT] = 0x30,
265 [DMA_RING2_TIMEOUT] = 0x34,
266 [DMA_RING3_TIMEOUT] = 0x38,
267 [DMA_RING4_TIMEOUT] = 0x3c,
268 [DMA_RING5_TIMEOUT] = 0x40,
269 [DMA_RING6_TIMEOUT] = 0x44,
270 [DMA_RING7_TIMEOUT] = 0x48,
271 [DMA_RING8_TIMEOUT] = 0x4c,
272 [DMA_RING9_TIMEOUT] = 0x50,
273 [DMA_RING10_TIMEOUT] = 0x54,
274 [DMA_RING11_TIMEOUT] = 0x58,
275 [DMA_RING12_TIMEOUT] = 0x5c,
276 [DMA_RING13_TIMEOUT] = 0x60,
277 [DMA_RING14_TIMEOUT] = 0x64,
278 [DMA_RING15_TIMEOUT] = 0x68,
279 [DMA_RING16_TIMEOUT] = 0x6C,
282 static const u8 bcmgenet_dma_regs_v1[] = {
285 [DMA_SCB_BURST_SIZE] = 0x0C,
286 [DMA_ARB_CTRL] = 0x30,
287 [DMA_PRIORITY_0] = 0x34,
288 [DMA_PRIORITY_1] = 0x38,
289 [DMA_PRIORITY_2] = 0x3C,
290 [DMA_RING0_TIMEOUT] = 0x2C,
291 [DMA_RING1_TIMEOUT] = 0x30,
292 [DMA_RING2_TIMEOUT] = 0x34,
293 [DMA_RING3_TIMEOUT] = 0x38,
294 [DMA_RING4_TIMEOUT] = 0x3c,
295 [DMA_RING5_TIMEOUT] = 0x40,
296 [DMA_RING6_TIMEOUT] = 0x44,
297 [DMA_RING7_TIMEOUT] = 0x48,
298 [DMA_RING8_TIMEOUT] = 0x4c,
299 [DMA_RING9_TIMEOUT] = 0x50,
300 [DMA_RING10_TIMEOUT] = 0x54,
301 [DMA_RING11_TIMEOUT] = 0x58,
302 [DMA_RING12_TIMEOUT] = 0x5c,
303 [DMA_RING13_TIMEOUT] = 0x60,
304 [DMA_RING14_TIMEOUT] = 0x64,
305 [DMA_RING15_TIMEOUT] = 0x68,
306 [DMA_RING16_TIMEOUT] = 0x6C,
309 /* Set at runtime once bcmgenet version is known */
310 static const u8 *bcmgenet_dma_regs;
312 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
314 return netdev_priv(dev_get_drvdata(dev));
317 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
320 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
321 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
324 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
325 u32 val, enum dma_reg r)
327 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
328 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
331 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
334 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
335 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
338 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
339 u32 val, enum dma_reg r)
341 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
342 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
345 /* RDMA/TDMA ring registers and accessors
346 * we merge the common fields and just prefix with T/D the registers
347 * having different meaning depending on the direction
351 RDMA_WRITE_PTR = TDMA_READ_PTR,
353 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
355 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
357 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
363 DMA_MBUF_DONE_THRESH,
365 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
367 RDMA_READ_PTR = TDMA_WRITE_PTR,
369 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
372 /* GENET v4 supports 40-bits pointer addressing
373 * for obvious reasons the LO and HI word parts
374 * are contiguous, but this offsets the other
377 static const u8 genet_dma_ring_regs_v4[] = {
378 [TDMA_READ_PTR] = 0x00,
379 [TDMA_READ_PTR_HI] = 0x04,
380 [TDMA_CONS_INDEX] = 0x08,
381 [TDMA_PROD_INDEX] = 0x0C,
382 [DMA_RING_BUF_SIZE] = 0x10,
383 [DMA_START_ADDR] = 0x14,
384 [DMA_START_ADDR_HI] = 0x18,
385 [DMA_END_ADDR] = 0x1C,
386 [DMA_END_ADDR_HI] = 0x20,
387 [DMA_MBUF_DONE_THRESH] = 0x24,
388 [TDMA_FLOW_PERIOD] = 0x28,
389 [TDMA_WRITE_PTR] = 0x2C,
390 [TDMA_WRITE_PTR_HI] = 0x30,
393 static const u8 genet_dma_ring_regs_v123[] = {
394 [TDMA_READ_PTR] = 0x00,
395 [TDMA_CONS_INDEX] = 0x04,
396 [TDMA_PROD_INDEX] = 0x08,
397 [DMA_RING_BUF_SIZE] = 0x0C,
398 [DMA_START_ADDR] = 0x10,
399 [DMA_END_ADDR] = 0x14,
400 [DMA_MBUF_DONE_THRESH] = 0x18,
401 [TDMA_FLOW_PERIOD] = 0x1C,
402 [TDMA_WRITE_PTR] = 0x20,
405 /* Set at runtime once GENET version is known */
406 static const u8 *genet_dma_ring_regs;
408 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
412 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
413 (DMA_RING_SIZE * ring) +
414 genet_dma_ring_regs[r]);
417 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
418 unsigned int ring, u32 val,
421 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
422 (DMA_RING_SIZE * ring) +
423 genet_dma_ring_regs[r]);
426 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
430 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
431 (DMA_RING_SIZE * ring) +
432 genet_dma_ring_regs[r]);
435 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
436 unsigned int ring, u32 val,
439 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
440 (DMA_RING_SIZE * ring) +
441 genet_dma_ring_regs[r]);
444 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
449 offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
450 reg = bcmgenet_hfb_reg_readl(priv, offset);
451 reg |= (1 << (f_index % 32));
452 bcmgenet_hfb_reg_writel(priv, reg, offset);
453 reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
455 bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
458 static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
460 u32 offset, reg, reg1;
462 offset = HFB_FLT_ENABLE_V3PLUS;
463 reg = bcmgenet_hfb_reg_readl(priv, offset);
464 reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
466 reg1 &= ~(1 << (f_index % 32));
467 bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
469 reg &= ~(1 << (f_index % 32));
470 bcmgenet_hfb_reg_writel(priv, reg, offset);
473 reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
475 bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
479 static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
480 u32 f_index, u32 rx_queue)
485 offset = f_index / 8;
486 reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
487 reg &= ~(0xF << (4 * (f_index % 8)));
488 reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
489 bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
492 static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
493 u32 f_index, u32 f_length)
498 offset = HFB_FLT_LEN_V3PLUS +
499 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
501 reg = bcmgenet_hfb_reg_readl(priv, offset);
502 reg &= ~(0xFF << (8 * (f_index % 4)));
503 reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
504 bcmgenet_hfb_reg_writel(priv, reg, offset);
507 static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
510 switch (*(unsigned char *)mask++) {
525 #define VALIDATE_MASK(x) \
526 bcmgenet_hfb_validate_mask(&(x), sizeof(x))
528 static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index,
529 u32 offset, void *val, void *mask,
534 index = f_index * priv->hw_params->hfb_filter_size + offset / 2;
535 tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));
540 tmp |= (*(unsigned char *)val++);
541 switch ((*(unsigned char *)mask++)) {
552 bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));
554 tmp = bcmgenet_hfb_readl(priv,
555 index * sizeof(u32));
558 tmp |= (*(unsigned char *)val++) << 8;
559 switch ((*(unsigned char *)mask++)) {
571 bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));
578 static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
579 struct bcmgenet_rxnfc_rule *rule)
581 struct ethtool_rx_flow_spec *fs = &rule->fs;
582 u32 offset = 0, f_length = 0, f;
589 if (fs->flow_type & FLOW_MAC_EXT) {
590 bcmgenet_hfb_insert_data(priv, f, 0,
591 &fs->h_ext.h_dest, &fs->m_ext.h_dest,
592 sizeof(fs->h_ext.h_dest));
595 if (fs->flow_type & FLOW_EXT) {
596 if (fs->m_ext.vlan_etype ||
597 fs->m_ext.vlan_tci) {
598 bcmgenet_hfb_insert_data(priv, f, 12,
599 &fs->h_ext.vlan_etype,
600 &fs->m_ext.vlan_etype,
601 sizeof(fs->h_ext.vlan_etype));
602 bcmgenet_hfb_insert_data(priv, f, 14,
605 sizeof(fs->h_ext.vlan_tci));
607 f_length += DIV_ROUND_UP(VLAN_HLEN, 2);
611 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
613 f_length += DIV_ROUND_UP(ETH_HLEN, 2);
614 bcmgenet_hfb_insert_data(priv, f, 0,
615 &fs->h_u.ether_spec.h_dest,
616 &fs->m_u.ether_spec.h_dest,
617 sizeof(fs->h_u.ether_spec.h_dest));
618 bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,
619 &fs->h_u.ether_spec.h_source,
620 &fs->m_u.ether_spec.h_source,
621 sizeof(fs->h_u.ether_spec.h_source));
622 bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
623 &fs->h_u.ether_spec.h_proto,
624 &fs->m_u.ether_spec.h_proto,
625 sizeof(fs->h_u.ether_spec.h_proto));
628 f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2);
629 /* Specify IP Ether Type */
630 val_16 = htons(ETH_P_IP);
632 bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
633 &val_16, &mask_16, sizeof(val_16));
634 bcmgenet_hfb_insert_data(priv, f, 15 + offset,
635 &fs->h_u.usr_ip4_spec.tos,
636 &fs->m_u.usr_ip4_spec.tos,
637 sizeof(fs->h_u.usr_ip4_spec.tos));
638 bcmgenet_hfb_insert_data(priv, f, 23 + offset,
639 &fs->h_u.usr_ip4_spec.proto,
640 &fs->m_u.usr_ip4_spec.proto,
641 sizeof(fs->h_u.usr_ip4_spec.proto));
642 bcmgenet_hfb_insert_data(priv, f, 26 + offset,
643 &fs->h_u.usr_ip4_spec.ip4src,
644 &fs->m_u.usr_ip4_spec.ip4src,
645 sizeof(fs->h_u.usr_ip4_spec.ip4src));
646 bcmgenet_hfb_insert_data(priv, f, 30 + offset,
647 &fs->h_u.usr_ip4_spec.ip4dst,
648 &fs->m_u.usr_ip4_spec.ip4dst,
649 sizeof(fs->h_u.usr_ip4_spec.ip4dst));
650 if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
653 /* Only supports 20 byte IPv4 header */
656 bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,
659 size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
660 bcmgenet_hfb_insert_data(priv, f,
661 ETH_HLEN + 20 + offset,
662 &fs->h_u.usr_ip4_spec.l4_4_bytes,
663 &fs->m_u.usr_ip4_spec.l4_4_bytes,
665 f_length += DIV_ROUND_UP(size, 2);
669 bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
670 if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
671 /* Ring 0 flows can be handled by the default Descriptor Ring
672 * We'll map them to ring 0, but don't enable the filter
674 bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
675 rule->state = BCMGENET_RXNFC_STATE_DISABLED;
677 /* Other Rx rings are direct mapped here */
678 bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
680 bcmgenet_hfb_enable_filter(priv, f);
681 rule->state = BCMGENET_RXNFC_STATE_ENABLED;
685 /* bcmgenet_hfb_clear
687 * Clear Hardware Filter Block and disable all filtering.
689 static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
693 base = f_index * priv->hw_params->hfb_filter_size;
694 for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
695 bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
698 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
702 if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
705 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
706 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
707 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
709 for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
710 bcmgenet_rdma_writel(priv, 0x0, i);
712 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
713 bcmgenet_hfb_reg_writel(priv, 0x0,
714 HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
716 for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
717 bcmgenet_hfb_clear_filter(priv, i);
720 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
724 INIT_LIST_HEAD(&priv->rxnfc_list);
725 if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
728 for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
729 INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
730 priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
733 bcmgenet_hfb_clear(priv);
736 static int bcmgenet_begin(struct net_device *dev)
738 struct bcmgenet_priv *priv = netdev_priv(dev);
740 /* Turn on the clock */
741 return clk_prepare_enable(priv->clk);
744 static void bcmgenet_complete(struct net_device *dev)
746 struct bcmgenet_priv *priv = netdev_priv(dev);
748 /* Turn off the clock */
749 clk_disable_unprepare(priv->clk);
752 static int bcmgenet_get_link_ksettings(struct net_device *dev,
753 struct ethtool_link_ksettings *cmd)
755 if (!netif_running(dev))
761 phy_ethtool_ksettings_get(dev->phydev, cmd);
766 static int bcmgenet_set_link_ksettings(struct net_device *dev,
767 const struct ethtool_link_ksettings *cmd)
769 if (!netif_running(dev))
775 return phy_ethtool_ksettings_set(dev->phydev, cmd);
778 static int bcmgenet_set_features(struct net_device *dev,
779 netdev_features_t features)
781 struct bcmgenet_priv *priv = netdev_priv(dev);
785 ret = clk_prepare_enable(priv->clk);
789 /* Make sure we reflect the value of CRC_CMD_FWD */
790 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
791 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
793 clk_disable_unprepare(priv->clk);
798 static u32 bcmgenet_get_msglevel(struct net_device *dev)
800 struct bcmgenet_priv *priv = netdev_priv(dev);
802 return priv->msg_enable;
805 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
807 struct bcmgenet_priv *priv = netdev_priv(dev);
809 priv->msg_enable = level;
812 static int bcmgenet_get_coalesce(struct net_device *dev,
813 struct ethtool_coalesce *ec,
814 struct kernel_ethtool_coalesce *kernel_coal,
815 struct netlink_ext_ack *extack)
817 struct bcmgenet_priv *priv = netdev_priv(dev);
818 struct bcmgenet_rx_ring *ring;
821 ec->tx_max_coalesced_frames =
822 bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
823 DMA_MBUF_DONE_THRESH);
824 ec->rx_max_coalesced_frames =
825 bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
826 DMA_MBUF_DONE_THRESH);
827 ec->rx_coalesce_usecs =
828 bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
830 for (i = 0; i < priv->hw_params->rx_queues; i++) {
831 ring = &priv->rx_rings[i];
832 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
834 ring = &priv->rx_rings[DESC_INDEX];
835 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
840 static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
843 struct bcmgenet_priv *priv = ring->priv;
844 unsigned int i = ring->index;
847 bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH);
849 reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
850 reg &= ~DMA_TIMEOUT_MASK;
851 reg |= DIV_ROUND_UP(usecs * 1000, 8192);
852 bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
855 static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
856 struct ethtool_coalesce *ec)
858 struct dim_cq_moder moder;
861 ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
862 ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
863 usecs = ring->rx_coalesce_usecs;
864 pkts = ring->rx_max_coalesced_frames;
866 if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
867 moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
872 ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
873 bcmgenet_set_rx_coalesce(ring, usecs, pkts);
876 static int bcmgenet_set_coalesce(struct net_device *dev,
877 struct ethtool_coalesce *ec,
878 struct kernel_ethtool_coalesce *kernel_coal,
879 struct netlink_ext_ack *extack)
881 struct bcmgenet_priv *priv = netdev_priv(dev);
884 /* Base system clock is 125Mhz, DMA timeout is this reference clock
885 * divided by 1024, which yields roughly 8.192us, our maximum value
886 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
888 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
889 ec->tx_max_coalesced_frames == 0 ||
890 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
891 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
894 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
897 /* GENET TDMA hardware does not support a configurable timeout, but will
898 * always generate an interrupt either after MBDONE packets have been
899 * transmitted, or when the ring is empty.
902 /* Program all TX queues with the same values, as there is no
903 * ethtool knob to do coalescing on a per-queue basis
905 for (i = 0; i < priv->hw_params->tx_queues; i++)
906 bcmgenet_tdma_ring_writel(priv, i,
907 ec->tx_max_coalesced_frames,
908 DMA_MBUF_DONE_THRESH);
909 bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
910 ec->tx_max_coalesced_frames,
911 DMA_MBUF_DONE_THRESH);
913 for (i = 0; i < priv->hw_params->rx_queues; i++)
914 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
915 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
920 static void bcmgenet_get_pauseparam(struct net_device *dev,
921 struct ethtool_pauseparam *epause)
923 struct bcmgenet_priv *priv;
926 priv = netdev_priv(dev);
928 epause->autoneg = priv->autoneg_pause;
930 if (netif_carrier_ok(dev)) {
931 /* report active state when link is up */
932 umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD);
933 epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE);
934 epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE);
936 /* otherwise report stored settings */
937 epause->tx_pause = priv->tx_pause;
938 epause->rx_pause = priv->rx_pause;
942 static int bcmgenet_set_pauseparam(struct net_device *dev,
943 struct ethtool_pauseparam *epause)
945 struct bcmgenet_priv *priv = netdev_priv(dev);
950 if (!phy_validate_pause(dev->phydev, epause))
953 priv->autoneg_pause = !!epause->autoneg;
954 priv->tx_pause = !!epause->tx_pause;
955 priv->rx_pause = !!epause->rx_pause;
957 bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
962 /* standard ethtool support functions. */
963 enum bcmgenet_stat_type {
964 BCMGENET_STAT_NETDEV = -1,
965 BCMGENET_STAT_MIB_RX,
966 BCMGENET_STAT_MIB_TX,
972 struct bcmgenet_stats {
973 char stat_string[ETH_GSTRING_LEN];
976 enum bcmgenet_stat_type type;
977 /* reg offset from UMAC base for misc counters */
981 #define STAT_NETDEV(m) { \
982 .stat_string = __stringify(m), \
983 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
984 .stat_offset = offsetof(struct net_device_stats, m), \
985 .type = BCMGENET_STAT_NETDEV, \
988 #define STAT_GENET_MIB(str, m, _type) { \
989 .stat_string = str, \
990 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
991 .stat_offset = offsetof(struct bcmgenet_priv, m), \
995 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
996 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
997 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
998 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
1000 #define STAT_GENET_MISC(str, m, offset) { \
1001 .stat_string = str, \
1002 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1003 .stat_offset = offsetof(struct bcmgenet_priv, m), \
1004 .type = BCMGENET_STAT_MISC, \
1005 .reg_offset = offset, \
1008 #define STAT_GENET_Q(num) \
1009 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
1010 tx_rings[num].packets), \
1011 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
1012 tx_rings[num].bytes), \
1013 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
1014 rx_rings[num].bytes), \
1015 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
1016 rx_rings[num].packets), \
1017 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
1018 rx_rings[num].errors), \
1019 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
1020 rx_rings[num].dropped)
1022 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
1023 * between the end of TX stats and the beginning of the RX RUNT
1025 #define BCMGENET_STAT_OFFSET 0xc
1027 /* Hardware counters must be kept in sync because the order/offset
1028 * is important here (order in structure declaration = order in hardware)
1030 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
1032 STAT_NETDEV(rx_packets),
1033 STAT_NETDEV(tx_packets),
1034 STAT_NETDEV(rx_bytes),
1035 STAT_NETDEV(tx_bytes),
1036 STAT_NETDEV(rx_errors),
1037 STAT_NETDEV(tx_errors),
1038 STAT_NETDEV(rx_dropped),
1039 STAT_NETDEV(tx_dropped),
1040 STAT_NETDEV(multicast),
1041 /* UniMAC RSV counters */
1042 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
1043 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
1044 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
1045 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
1046 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
1047 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
1048 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
1049 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
1050 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
1051 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
1052 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
1053 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
1054 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
1055 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
1056 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
1057 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
1058 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
1059 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
1060 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
1061 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
1062 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
1063 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
1064 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
1065 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
1066 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
1067 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
1068 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
1069 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
1070 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
1071 /* UniMAC TSV counters */
1072 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
1073 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
1074 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
1075 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
1076 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
1077 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
1078 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
1079 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
1080 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
1081 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
1082 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
1083 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
1084 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
1085 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
1086 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
1087 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
1088 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
1089 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
1090 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
1091 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
1092 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
1093 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
1094 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
1095 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
1096 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
1097 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
1098 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
1099 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
1100 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
1101 /* UniMAC RUNT counters */
1102 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
1103 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
1104 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
1105 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
1106 /* Misc UniMAC counters */
1107 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
1108 UMAC_RBUF_OVFL_CNT_V1),
1109 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
1110 UMAC_RBUF_ERR_CNT_V1),
1111 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
1112 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
1113 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
1114 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
1115 STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
1116 STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
1117 mib.tx_realloc_tsb_failed),
1126 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
1128 static void bcmgenet_get_drvinfo(struct net_device *dev,
1129 struct ethtool_drvinfo *info)
1131 strscpy(info->driver, "bcmgenet", sizeof(info->driver));
1134 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
1136 switch (string_set) {
1138 return BCMGENET_STATS_LEN;
1144 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
1149 switch (stringset) {
1151 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1152 memcpy(data + i * ETH_GSTRING_LEN,
1153 bcmgenet_gstrings_stats[i].stat_string,
1160 static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
1166 case UMAC_RBUF_OVFL_CNT_V1:
1167 if (GENET_IS_V2(priv))
1168 new_offset = RBUF_OVFL_CNT_V2;
1170 new_offset = RBUF_OVFL_CNT_V3PLUS;
1172 val = bcmgenet_rbuf_readl(priv, new_offset);
1173 /* clear if overflowed */
1175 bcmgenet_rbuf_writel(priv, 0, new_offset);
1177 case UMAC_RBUF_ERR_CNT_V1:
1178 if (GENET_IS_V2(priv))
1179 new_offset = RBUF_ERR_CNT_V2;
1181 new_offset = RBUF_ERR_CNT_V3PLUS;
1183 val = bcmgenet_rbuf_readl(priv, new_offset);
1184 /* clear if overflowed */
1186 bcmgenet_rbuf_writel(priv, 0, new_offset);
1189 val = bcmgenet_umac_readl(priv, offset);
1190 /* clear if overflowed */
1192 bcmgenet_umac_writel(priv, 0, offset);
1199 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
1203 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1204 const struct bcmgenet_stats *s;
1209 s = &bcmgenet_gstrings_stats[i];
1211 case BCMGENET_STAT_NETDEV:
1212 case BCMGENET_STAT_SOFT:
1214 case BCMGENET_STAT_RUNT:
1215 offset += BCMGENET_STAT_OFFSET;
1217 case BCMGENET_STAT_MIB_TX:
1218 offset += BCMGENET_STAT_OFFSET;
1220 case BCMGENET_STAT_MIB_RX:
1221 val = bcmgenet_umac_readl(priv,
1222 UMAC_MIB_START + j + offset);
1223 offset = 0; /* Reset Offset */
1225 case BCMGENET_STAT_MISC:
1226 if (GENET_IS_V1(priv)) {
1227 val = bcmgenet_umac_readl(priv, s->reg_offset);
1228 /* clear if overflowed */
1230 bcmgenet_umac_writel(priv, 0,
1233 val = bcmgenet_update_stat_misc(priv,
1239 j += s->stat_sizeof;
1240 p = (char *)priv + s->stat_offset;
1245 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
1246 struct ethtool_stats *stats,
1249 struct bcmgenet_priv *priv = netdev_priv(dev);
1252 if (netif_running(dev))
1253 bcmgenet_update_mib_counters(priv);
1255 dev->netdev_ops->ndo_get_stats(dev);
1257 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1258 const struct bcmgenet_stats *s;
1261 s = &bcmgenet_gstrings_stats[i];
1262 if (s->type == BCMGENET_STAT_NETDEV)
1263 p = (char *)&dev->stats;
1266 p += s->stat_offset;
1267 if (sizeof(unsigned long) != sizeof(u32) &&
1268 s->stat_sizeof == sizeof(unsigned long))
1269 data[i] = *(unsigned long *)p;
1271 data[i] = *(u32 *)p;
1275 static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
1277 struct bcmgenet_priv *priv = netdev_priv(dev);
1278 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
1281 if (enable && !priv->clk_eee_enabled) {
1282 clk_prepare_enable(priv->clk_eee);
1283 priv->clk_eee_enabled = true;
1286 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
1291 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
1293 /* Enable EEE and switch to a 27Mhz clock automatically */
1294 reg = bcmgenet_readl(priv->base + off);
1296 reg |= TBUF_EEE_EN | TBUF_PM_EN;
1298 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
1299 bcmgenet_writel(reg, priv->base + off);
1301 /* Do the same for thing for RBUF */
1302 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
1304 reg |= RBUF_EEE_EN | RBUF_PM_EN;
1306 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
1307 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
1309 if (!enable && priv->clk_eee_enabled) {
1310 clk_disable_unprepare(priv->clk_eee);
1311 priv->clk_eee_enabled = false;
1314 priv->eee.eee_enabled = enable;
1315 priv->eee.eee_active = enable;
1318 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
1320 struct bcmgenet_priv *priv = netdev_priv(dev);
1321 struct ethtool_eee *p = &priv->eee;
1323 if (GENET_IS_V1(priv))
1329 e->eee_enabled = p->eee_enabled;
1330 e->eee_active = p->eee_active;
1331 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
1333 return phy_ethtool_get_eee(dev->phydev, e);
1336 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
1338 struct bcmgenet_priv *priv = netdev_priv(dev);
1339 struct ethtool_eee *p = &priv->eee;
1342 if (GENET_IS_V1(priv))
1348 p->eee_enabled = e->eee_enabled;
1350 if (!p->eee_enabled) {
1351 bcmgenet_eee_enable_set(dev, false);
1353 ret = phy_init_eee(dev->phydev, false);
1355 netif_err(priv, hw, dev, "EEE initialization failed\n");
1359 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
1360 bcmgenet_eee_enable_set(dev, true);
1363 return phy_ethtool_set_eee(dev->phydev, e);
1366 static int bcmgenet_validate_flow(struct net_device *dev,
1367 struct ethtool_rxnfc *cmd)
1369 struct ethtool_usrip4_spec *l4_mask;
1370 struct ethhdr *eth_mask;
1372 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES &&
1373 cmd->fs.location != RX_CLS_LOC_ANY) {
1374 netdev_err(dev, "rxnfc: Invalid location (%d)\n",
1379 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
1381 l4_mask = &cmd->fs.m_u.usr_ip4_spec;
1382 /* don't allow mask which isn't valid */
1383 if (VALIDATE_MASK(l4_mask->ip4src) ||
1384 VALIDATE_MASK(l4_mask->ip4dst) ||
1385 VALIDATE_MASK(l4_mask->l4_4_bytes) ||
1386 VALIDATE_MASK(l4_mask->proto) ||
1387 VALIDATE_MASK(l4_mask->ip_ver) ||
1388 VALIDATE_MASK(l4_mask->tos)) {
1389 netdev_err(dev, "rxnfc: Unsupported mask\n");
1394 eth_mask = &cmd->fs.m_u.ether_spec;
1395 /* don't allow mask which isn't valid */
1396 if (VALIDATE_MASK(eth_mask->h_dest) ||
1397 VALIDATE_MASK(eth_mask->h_source) ||
1398 VALIDATE_MASK(eth_mask->h_proto)) {
1399 netdev_err(dev, "rxnfc: Unsupported mask\n");
1404 netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n",
1409 if ((cmd->fs.flow_type & FLOW_EXT)) {
1410 /* don't allow mask which isn't valid */
1411 if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) ||
1412 VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) {
1413 netdev_err(dev, "rxnfc: Unsupported mask\n");
1416 if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) {
1417 netdev_err(dev, "rxnfc: user-def not supported\n");
1422 if ((cmd->fs.flow_type & FLOW_MAC_EXT)) {
1423 /* don't allow mask which isn't valid */
1424 if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) {
1425 netdev_err(dev, "rxnfc: Unsupported mask\n");
1433 static int bcmgenet_insert_flow(struct net_device *dev,
1434 struct ethtool_rxnfc *cmd)
1436 struct bcmgenet_priv *priv = netdev_priv(dev);
1437 struct bcmgenet_rxnfc_rule *loc_rule;
1440 if (priv->hw_params->hfb_filter_size < 128) {
1441 netdev_err(dev, "rxnfc: Not supported by this device\n");
1445 if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
1446 cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
1447 netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
1448 cmd->fs.ring_cookie);
1452 err = bcmgenet_validate_flow(dev, cmd);
1456 if (cmd->fs.location == RX_CLS_LOC_ANY) {
1457 list_for_each_entry(loc_rule, &priv->rxnfc_list, list) {
1458 cmd->fs.location = loc_rule->fs.location;
1459 err = memcmp(&loc_rule->fs, &cmd->fs,
1460 sizeof(struct ethtool_rx_flow_spec));
1462 /* rule exists so return current location */
1465 for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
1466 loc_rule = &priv->rxnfc_rules[i];
1467 if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
1468 cmd->fs.location = i;
1472 if (i == MAX_NUM_OF_FS_RULES) {
1473 cmd->fs.location = RX_CLS_LOC_ANY;
1477 loc_rule = &priv->rxnfc_rules[cmd->fs.location];
1479 if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
1480 bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
1481 if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
1482 list_del(&loc_rule->list);
1483 bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
1485 loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
1486 memcpy(&loc_rule->fs, &cmd->fs,
1487 sizeof(struct ethtool_rx_flow_spec));
1489 bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
1491 list_add_tail(&loc_rule->list, &priv->rxnfc_list);
1496 static int bcmgenet_delete_flow(struct net_device *dev,
1497 struct ethtool_rxnfc *cmd)
1499 struct bcmgenet_priv *priv = netdev_priv(dev);
1500 struct bcmgenet_rxnfc_rule *rule;
1503 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
1506 rule = &priv->rxnfc_rules[cmd->fs.location];
1507 if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
1512 if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
1513 bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
1514 if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
1515 list_del(&rule->list);
1516 bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
1518 rule->state = BCMGENET_RXNFC_STATE_UNUSED;
1519 memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
1525 static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1527 struct bcmgenet_priv *priv = netdev_priv(dev);
1531 case ETHTOOL_SRXCLSRLINS:
1532 err = bcmgenet_insert_flow(dev, cmd);
1534 case ETHTOOL_SRXCLSRLDEL:
1535 err = bcmgenet_delete_flow(dev, cmd);
1538 netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n",
1546 static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
1549 struct bcmgenet_priv *priv = netdev_priv(dev);
1550 struct bcmgenet_rxnfc_rule *rule;
1553 if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
1556 rule = &priv->rxnfc_rules[loc];
1557 if (rule->state == BCMGENET_RXNFC_STATE_UNUSED)
1560 memcpy(&cmd->fs, &rule->fs,
1561 sizeof(struct ethtool_rx_flow_spec));
1566 static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
1568 struct list_head *pos;
1571 list_for_each(pos, &priv->rxnfc_list)
1577 static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1580 struct bcmgenet_priv *priv = netdev_priv(dev);
1581 struct bcmgenet_rxnfc_rule *rule;
1586 case ETHTOOL_GRXRINGS:
1587 cmd->data = priv->hw_params->rx_queues ?: 1;
1589 case ETHTOOL_GRXCLSRLCNT:
1590 cmd->rule_cnt = bcmgenet_get_num_flows(priv);
1591 cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL;
1593 case ETHTOOL_GRXCLSRULE:
1594 err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
1596 case ETHTOOL_GRXCLSRLALL:
1597 list_for_each_entry(rule, &priv->rxnfc_list, list)
1598 if (i < cmd->rule_cnt)
1599 rule_locs[i++] = rule->fs.location;
1601 cmd->data = MAX_NUM_OF_FS_RULES;
1611 /* standard ethtool support functions. */
1612 static const struct ethtool_ops bcmgenet_ethtool_ops = {
1613 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1614 ETHTOOL_COALESCE_MAX_FRAMES |
1615 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
1616 .begin = bcmgenet_begin,
1617 .complete = bcmgenet_complete,
1618 .get_strings = bcmgenet_get_strings,
1619 .get_sset_count = bcmgenet_get_sset_count,
1620 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
1621 .get_drvinfo = bcmgenet_get_drvinfo,
1622 .get_link = ethtool_op_get_link,
1623 .get_msglevel = bcmgenet_get_msglevel,
1624 .set_msglevel = bcmgenet_set_msglevel,
1625 .get_wol = bcmgenet_get_wol,
1626 .set_wol = bcmgenet_set_wol,
1627 .get_eee = bcmgenet_get_eee,
1628 .set_eee = bcmgenet_set_eee,
1629 .nway_reset = phy_ethtool_nway_reset,
1630 .get_coalesce = bcmgenet_get_coalesce,
1631 .set_coalesce = bcmgenet_set_coalesce,
1632 .get_link_ksettings = bcmgenet_get_link_ksettings,
1633 .set_link_ksettings = bcmgenet_set_link_ksettings,
1634 .get_ts_info = ethtool_op_get_ts_info,
1635 .get_rxnfc = bcmgenet_get_rxnfc,
1636 .set_rxnfc = bcmgenet_set_rxnfc,
1637 .get_pauseparam = bcmgenet_get_pauseparam,
1638 .set_pauseparam = bcmgenet_set_pauseparam,
1641 /* Power down the unimac, based on mode. */
1642 static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1643 enum bcmgenet_power_mode mode)
1649 case GENET_POWER_CABLE_SENSE:
1650 phy_detach(priv->dev->phydev);
1653 case GENET_POWER_WOL_MAGIC:
1654 ret = bcmgenet_wol_power_down_cfg(priv, mode);
1657 case GENET_POWER_PASSIVE:
1658 /* Power down LED */
1659 if (priv->hw_params->flags & GENET_HAS_EXT) {
1660 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1661 if (GENET_IS_V5(priv) && !priv->ephy_16nm)
1662 reg |= EXT_PWR_DOWN_PHY_EN |
1663 EXT_PWR_DOWN_PHY_RD |
1664 EXT_PWR_DOWN_PHY_SD |
1665 EXT_PWR_DOWN_PHY_RX |
1666 EXT_PWR_DOWN_PHY_TX |
1669 reg |= EXT_PWR_DOWN_PHY;
1671 reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1672 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1674 bcmgenet_phy_power_set(priv->dev, false);
1684 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1685 enum bcmgenet_power_mode mode)
1689 if (!(priv->hw_params->flags & GENET_HAS_EXT))
1692 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1695 case GENET_POWER_PASSIVE:
1696 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
1697 EXT_ENERGY_DET_MASK);
1698 if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
1699 reg &= ~(EXT_PWR_DOWN_PHY_EN |
1700 EXT_PWR_DOWN_PHY_RD |
1701 EXT_PWR_DOWN_PHY_SD |
1702 EXT_PWR_DOWN_PHY_RX |
1703 EXT_PWR_DOWN_PHY_TX |
1705 reg |= EXT_PHY_RESET;
1706 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1709 reg &= ~EXT_PHY_RESET;
1711 reg &= ~EXT_PWR_DOWN_PHY;
1712 reg |= EXT_PWR_DN_EN_LD;
1714 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1715 bcmgenet_phy_power_set(priv->dev, true);
1718 case GENET_POWER_CABLE_SENSE:
1720 if (!GENET_IS_V5(priv)) {
1721 reg |= EXT_PWR_DN_EN_LD;
1722 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1725 case GENET_POWER_WOL_MAGIC:
1726 bcmgenet_wol_power_up_cfg(priv, mode);
1733 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1734 struct bcmgenet_tx_ring *ring)
1736 struct enet_cb *tx_cb_ptr;
1738 tx_cb_ptr = ring->cbs;
1739 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1741 /* Advancing local write pointer */
1742 if (ring->write_ptr == ring->end_ptr)
1743 ring->write_ptr = ring->cb_ptr;
1750 static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
1751 struct bcmgenet_tx_ring *ring)
1753 struct enet_cb *tx_cb_ptr;
1755 tx_cb_ptr = ring->cbs;
1756 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1758 /* Rewinding local write pointer */
1759 if (ring->write_ptr == ring->cb_ptr)
1760 ring->write_ptr = ring->end_ptr;
1767 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1769 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1770 INTRL2_CPU_MASK_SET);
1773 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1775 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1776 INTRL2_CPU_MASK_CLEAR);
1779 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1781 bcmgenet_intrl2_1_writel(ring->priv,
1782 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1783 INTRL2_CPU_MASK_SET);
1786 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1788 bcmgenet_intrl2_1_writel(ring->priv,
1789 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1790 INTRL2_CPU_MASK_CLEAR);
1793 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
1795 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1796 INTRL2_CPU_MASK_SET);
1799 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1801 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1802 INTRL2_CPU_MASK_CLEAR);
1805 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1807 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1808 INTRL2_CPU_MASK_CLEAR);
1811 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1813 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1814 INTRL2_CPU_MASK_SET);
1817 /* Simple helper to free a transmit control block's resources
1818 * Returns an skb when the last transmit control block associated with the
1819 * skb is freed. The skb should be freed by the caller if necessary.
1821 static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
1824 struct sk_buff *skb;
1830 if (cb == GENET_CB(skb)->first_cb)
1831 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1832 dma_unmap_len(cb, dma_len),
1835 dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
1836 dma_unmap_len(cb, dma_len),
1838 dma_unmap_addr_set(cb, dma_addr, 0);
1840 if (cb == GENET_CB(skb)->last_cb)
1843 } else if (dma_unmap_addr(cb, dma_addr)) {
1845 dma_unmap_addr(cb, dma_addr),
1846 dma_unmap_len(cb, dma_len),
1848 dma_unmap_addr_set(cb, dma_addr, 0);
1854 /* Simple helper to free a receive control block's resources */
1855 static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
1858 struct sk_buff *skb;
1863 if (dma_unmap_addr(cb, dma_addr)) {
1864 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1865 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
1866 dma_unmap_addr_set(cb, dma_addr, 0);
1872 /* Unlocked version of the reclaim routine */
1873 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1874 struct bcmgenet_tx_ring *ring)
1876 struct bcmgenet_priv *priv = netdev_priv(dev);
1877 unsigned int txbds_processed = 0;
1878 unsigned int bytes_compl = 0;
1879 unsigned int pkts_compl = 0;
1880 unsigned int txbds_ready;
1881 unsigned int c_index;
1882 struct sk_buff *skb;
1884 /* Clear status before servicing to reduce spurious interrupts */
1885 if (ring->index == DESC_INDEX)
1886 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
1889 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
1892 /* Compute how many buffers are transmitted since last xmit call */
1893 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
1895 txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
1897 netif_dbg(priv, tx_done, dev,
1898 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1899 __func__, ring->index, ring->c_index, c_index, txbds_ready);
1901 /* Reclaim transmitted buffers */
1902 while (txbds_processed < txbds_ready) {
1903 skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
1904 &priv->tx_cbs[ring->clean_ptr]);
1907 bytes_compl += GENET_CB(skb)->bytes_sent;
1908 dev_consume_skb_any(skb);
1912 if (likely(ring->clean_ptr < ring->end_ptr))
1915 ring->clean_ptr = ring->cb_ptr;
1918 ring->free_bds += txbds_processed;
1919 ring->c_index = c_index;
1921 ring->packets += pkts_compl;
1922 ring->bytes += bytes_compl;
1924 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
1925 pkts_compl, bytes_compl);
1927 return txbds_processed;
1930 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1931 struct bcmgenet_tx_ring *ring)
1933 unsigned int released;
1935 spin_lock_bh(&ring->lock);
1936 released = __bcmgenet_tx_reclaim(dev, ring);
1937 spin_unlock_bh(&ring->lock);
1942 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1944 struct bcmgenet_tx_ring *ring =
1945 container_of(napi, struct bcmgenet_tx_ring, napi);
1946 unsigned int work_done = 0;
1947 struct netdev_queue *txq;
1949 spin_lock(&ring->lock);
1950 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
1951 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1952 txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
1953 netif_tx_wake_queue(txq);
1955 spin_unlock(&ring->lock);
1957 if (work_done == 0) {
1958 napi_complete(napi);
1959 ring->int_enable(ring);
1967 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1969 struct bcmgenet_priv *priv = netdev_priv(dev);
1972 if (netif_is_multiqueue(dev)) {
1973 for (i = 0; i < priv->hw_params->tx_queues; i++)
1974 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1977 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1980 /* Reallocate the SKB to put enough headroom in front of it and insert
1981 * the transmit checksum offsets in the descriptors
1983 static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
1984 struct sk_buff *skb)
1986 struct bcmgenet_priv *priv = netdev_priv(dev);
1987 struct status_64 *status = NULL;
1988 struct sk_buff *new_skb;
1994 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1995 /* If 64 byte status block enabled, must make sure skb has
1996 * enough headroom for us to insert 64B status block.
1998 new_skb = skb_realloc_headroom(skb, sizeof(*status));
2000 dev_kfree_skb_any(skb);
2001 priv->mib.tx_realloc_tsb_failed++;
2002 dev->stats.tx_dropped++;
2005 dev_consume_skb_any(skb);
2007 priv->mib.tx_realloc_tsb++;
2010 skb_push(skb, sizeof(*status));
2011 status = (struct status_64 *)skb->data;
2013 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2014 ip_ver = skb->protocol;
2016 case htons(ETH_P_IP):
2017 ip_proto = ip_hdr(skb)->protocol;
2019 case htons(ETH_P_IPV6):
2020 ip_proto = ipv6_hdr(skb)->nexthdr;
2023 /* don't use UDP flag */
2028 offset = skb_checksum_start_offset(skb) - sizeof(*status);
2029 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
2030 (offset + skb->csum_offset) |
2033 /* Set the special UDP flag for UDP */
2034 if (ip_proto == IPPROTO_UDP)
2035 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
2037 status->tx_csum_info = tx_csum_info;
2043 static void bcmgenet_hide_tsb(struct sk_buff *skb)
2045 __skb_pull(skb, sizeof(struct status_64));
2048 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
2050 struct bcmgenet_priv *priv = netdev_priv(dev);
2051 struct device *kdev = &priv->pdev->dev;
2052 struct bcmgenet_tx_ring *ring = NULL;
2053 struct enet_cb *tx_cb_ptr;
2054 struct netdev_queue *txq;
2055 int nr_frags, index;
2063 index = skb_get_queue_mapping(skb);
2064 /* Mapping strategy:
2065 * queue_mapping = 0, unclassified, packet xmited through ring16
2066 * queue_mapping = 1, goes to ring 0. (highest priority queue
2067 * queue_mapping = 2, goes to ring 1.
2068 * queue_mapping = 3, goes to ring 2.
2069 * queue_mapping = 4, goes to ring 3.
2076 ring = &priv->tx_rings[index];
2077 txq = netdev_get_tx_queue(dev, ring->queue);
2079 nr_frags = skb_shinfo(skb)->nr_frags;
2081 spin_lock(&ring->lock);
2082 if (ring->free_bds <= (nr_frags + 1)) {
2083 if (!netif_tx_queue_stopped(txq)) {
2084 netif_tx_stop_queue(txq);
2086 "%s: tx ring %d full when queue %d awake\n",
2087 __func__, index, ring->queue);
2089 ret = NETDEV_TX_BUSY;
2093 /* Retain how many bytes will be sent on the wire, without TSB inserted
2094 * by transmit checksum offload
2096 GENET_CB(skb)->bytes_sent = skb->len;
2098 /* add the Transmit Status Block */
2099 skb = bcmgenet_add_tsb(dev, skb);
2105 for (i = 0; i <= nr_frags; i++) {
2106 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
2111 /* Transmit single SKB or head of fragment list */
2112 GENET_CB(skb)->first_cb = tx_cb_ptr;
2113 size = skb_headlen(skb);
2114 mapping = dma_map_single(kdev, skb->data, size,
2118 frag = &skb_shinfo(skb)->frags[i - 1];
2119 size = skb_frag_size(frag);
2120 mapping = skb_frag_dma_map(kdev, frag, 0, size,
2124 ret = dma_mapping_error(kdev, mapping);
2126 priv->mib.tx_dma_failed++;
2127 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
2129 goto out_unmap_frags;
2131 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
2132 dma_unmap_len_set(tx_cb_ptr, dma_len, size);
2134 tx_cb_ptr->skb = skb;
2136 len_stat = (size << DMA_BUFLENGTH_SHIFT) |
2137 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
2139 /* Note: if we ever change from DMA_TX_APPEND_CRC below we
2140 * will need to restore software padding of "runt" packets
2143 len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
2144 if (skb->ip_summed == CHECKSUM_PARTIAL)
2145 len_stat |= DMA_TX_DO_CSUM;
2148 len_stat |= DMA_EOP;
2150 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
2153 GENET_CB(skb)->last_cb = tx_cb_ptr;
2155 bcmgenet_hide_tsb(skb);
2156 skb_tx_timestamp(skb);
2158 /* Decrement total BD count and advance our write pointer */
2159 ring->free_bds -= nr_frags + 1;
2160 ring->prod_index += nr_frags + 1;
2161 ring->prod_index &= DMA_P_INDEX_MASK;
2163 netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
2165 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
2166 netif_tx_stop_queue(txq);
2168 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
2169 /* Packets are ready, update producer index */
2170 bcmgenet_tdma_ring_writel(priv, ring->index,
2171 ring->prod_index, TDMA_PROD_INDEX);
2173 spin_unlock(&ring->lock);
2178 /* Back up for failed control block mapping */
2179 bcmgenet_put_txcb(priv, ring);
2181 /* Unmap successfully mapped control blocks */
2183 tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
2184 bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
2191 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
2194 struct device *kdev = &priv->pdev->dev;
2195 struct sk_buff *skb;
2196 struct sk_buff *rx_skb;
2199 /* Allocate a new Rx skb */
2200 skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
2201 GFP_ATOMIC | __GFP_NOWARN);
2203 priv->mib.alloc_rx_buff_failed++;
2204 netif_err(priv, rx_err, priv->dev,
2205 "%s: Rx skb allocation failed\n", __func__);
2209 /* DMA-map the new Rx skb */
2210 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
2212 if (dma_mapping_error(kdev, mapping)) {
2213 priv->mib.rx_dma_failed++;
2214 dev_kfree_skb_any(skb);
2215 netif_err(priv, rx_err, priv->dev,
2216 "%s: Rx skb DMA mapping failed\n", __func__);
2220 /* Grab the current Rx skb from the ring and DMA-unmap it */
2221 rx_skb = bcmgenet_free_rx_cb(kdev, cb);
2223 /* Put the new Rx skb on the ring */
2225 dma_unmap_addr_set(cb, dma_addr, mapping);
2226 dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
2227 dmadesc_set_addr(priv, cb->bd_addr, mapping);
2229 /* Return the current Rx skb to caller */
2233 /* bcmgenet_desc_rx - descriptor based rx process.
2234 * this could be called from bottom half, or from NAPI polling method.
2236 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
2237 unsigned int budget)
2239 struct bcmgenet_priv *priv = ring->priv;
2240 struct net_device *dev = priv->dev;
2242 struct sk_buff *skb;
2243 u32 dma_length_status;
2244 unsigned long dma_flag;
2246 unsigned int rxpktprocessed = 0, rxpkttoprocess;
2247 unsigned int bytes_processed = 0;
2248 unsigned int p_index, mask;
2249 unsigned int discards;
2251 /* Clear status before servicing to reduce spurious interrupts */
2252 if (ring->index == DESC_INDEX) {
2253 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
2256 mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
2257 bcmgenet_intrl2_1_writel(priv,
2262 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
2264 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
2265 DMA_P_INDEX_DISCARD_CNT_MASK;
2266 if (discards > ring->old_discards) {
2267 discards = discards - ring->old_discards;
2268 ring->errors += discards;
2269 ring->old_discards += discards;
2271 /* Clear HW register when we reach 75% of maximum 0xFFFF */
2272 if (ring->old_discards >= 0xC000) {
2273 ring->old_discards = 0;
2274 bcmgenet_rdma_ring_writel(priv, ring->index, 0,
2279 p_index &= DMA_P_INDEX_MASK;
2280 rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
2282 netif_dbg(priv, rx_status, dev,
2283 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
2285 while ((rxpktprocessed < rxpkttoprocess) &&
2286 (rxpktprocessed < budget)) {
2287 struct status_64 *status;
2290 cb = &priv->rx_cbs[ring->read_ptr];
2291 skb = bcmgenet_rx_refill(priv, cb);
2293 if (unlikely(!skb)) {
2298 status = (struct status_64 *)skb->data;
2299 dma_length_status = status->length_status;
2300 if (dev->features & NETIF_F_RXCSUM) {
2301 rx_csum = (__force __be16)(status->rx_csum & 0xffff);
2303 skb->csum = (__force __wsum)ntohs(rx_csum);
2304 skb->ip_summed = CHECKSUM_COMPLETE;
2308 /* DMA flags and length are still valid no matter how
2309 * we got the Receive Status Vector (64B RSB or register)
2311 dma_flag = dma_length_status & 0xffff;
2312 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
2314 netif_dbg(priv, rx_status, dev,
2315 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
2316 __func__, p_index, ring->c_index,
2317 ring->read_ptr, dma_length_status);
2319 if (unlikely(len > RX_BUF_LENGTH)) {
2320 netif_err(priv, rx_status, dev, "oversized packet\n");
2321 dev->stats.rx_length_errors++;
2322 dev->stats.rx_errors++;
2323 dev_kfree_skb_any(skb);
2327 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
2328 netif_err(priv, rx_status, dev,
2329 "dropping fragmented packet!\n");
2331 dev_kfree_skb_any(skb);
2336 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
2341 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
2342 (unsigned int)dma_flag);
2343 if (dma_flag & DMA_RX_CRC_ERROR)
2344 dev->stats.rx_crc_errors++;
2345 if (dma_flag & DMA_RX_OV)
2346 dev->stats.rx_over_errors++;
2347 if (dma_flag & DMA_RX_NO)
2348 dev->stats.rx_frame_errors++;
2349 if (dma_flag & DMA_RX_LG)
2350 dev->stats.rx_length_errors++;
2351 dev->stats.rx_errors++;
2352 dev_kfree_skb_any(skb);
2354 } /* error packet */
2358 /* remove RSB and hardware 2bytes added for IP alignment */
2362 if (priv->crc_fwd_en) {
2363 skb_trim(skb, len - ETH_FCS_LEN);
2367 bytes_processed += len;
2369 /*Finish setting up the received SKB and send it to the kernel*/
2370 skb->protocol = eth_type_trans(skb, priv->dev);
2373 if (dma_flag & DMA_RX_MULT)
2374 dev->stats.multicast++;
2377 napi_gro_receive(&ring->napi, skb);
2378 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
2382 if (likely(ring->read_ptr < ring->end_ptr))
2385 ring->read_ptr = ring->cb_ptr;
2387 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
2388 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
2391 ring->dim.bytes = bytes_processed;
2392 ring->dim.packets = rxpktprocessed;
2394 return rxpktprocessed;
2397 /* Rx NAPI polling method */
2398 static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
2400 struct bcmgenet_rx_ring *ring = container_of(napi,
2401 struct bcmgenet_rx_ring, napi);
2402 struct dim_sample dim_sample = {};
2403 unsigned int work_done;
2405 work_done = bcmgenet_desc_rx(ring, budget);
2407 if (work_done < budget) {
2408 napi_complete_done(napi, work_done);
2409 ring->int_enable(ring);
2412 if (ring->dim.use_dim) {
2413 dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
2414 ring->dim.bytes, &dim_sample);
2415 net_dim(&ring->dim.dim, dim_sample);
2421 static void bcmgenet_dim_work(struct work_struct *work)
2423 struct dim *dim = container_of(work, struct dim, work);
2424 struct bcmgenet_net_dim *ndim =
2425 container_of(dim, struct bcmgenet_net_dim, dim);
2426 struct bcmgenet_rx_ring *ring =
2427 container_of(ndim, struct bcmgenet_rx_ring, dim);
2428 struct dim_cq_moder cur_profile =
2429 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
2431 bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
2432 dim->state = DIM_START_MEASURE;
2435 /* Assign skb to RX DMA descriptor. */
2436 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
2437 struct bcmgenet_rx_ring *ring)
2440 struct sk_buff *skb;
2443 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2445 /* loop here for each buffer needing assign */
2446 for (i = 0; i < ring->size; i++) {
2448 skb = bcmgenet_rx_refill(priv, cb);
2450 dev_consume_skb_any(skb);
2458 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
2460 struct sk_buff *skb;
2464 for (i = 0; i < priv->num_rx_bds; i++) {
2465 cb = &priv->rx_cbs[i];
2467 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
2469 dev_consume_skb_any(skb);
2473 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
2477 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2478 if (reg & CMD_SW_RESET)
2484 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2486 /* UniMAC stops on a packet boundary, wait for a full-size packet
2490 usleep_range(1000, 2000);
2493 static void reset_umac(struct bcmgenet_priv *priv)
2495 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
2496 bcmgenet_rbuf_ctrl_set(priv, 0);
2499 /* issue soft reset and disable MAC while updating its registers */
2500 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
2504 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
2506 /* Mask all interrupts.*/
2507 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
2508 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
2509 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
2510 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
2513 static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
2515 u32 int0_enable = 0;
2517 /* Monitor cable plug/unplugged event for internal PHY, external PHY
2520 if (priv->internal_phy) {
2521 int0_enable |= UMAC_IRQ_LINK_EVENT;
2522 if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
2523 int0_enable |= UMAC_IRQ_PHY_DET_R;
2524 } else if (priv->ext_phy) {
2525 int0_enable |= UMAC_IRQ_LINK_EVENT;
2526 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2527 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
2528 int0_enable |= UMAC_IRQ_LINK_EVENT;
2530 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2533 static void init_umac(struct bcmgenet_priv *priv)
2535 struct device *kdev = &priv->pdev->dev;
2537 u32 int0_enable = 0;
2539 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
2543 /* clear tx/rx counter */
2544 bcmgenet_umac_writel(priv,
2545 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
2547 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
2549 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2551 /* init tx registers, enable TSB */
2552 reg = bcmgenet_tbuf_ctrl_get(priv);
2554 bcmgenet_tbuf_ctrl_set(priv, reg);
2556 /* init rx registers, enable ip header optimization and RSB */
2557 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
2558 reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
2559 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
2561 /* enable rx checksumming */
2562 reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
2563 reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
2564 /* If UniMAC forwards CRC, we need to skip over it to get
2565 * a valid CHK bit to be set in the per-packet status word
2567 if (priv->crc_fwd_en)
2568 reg |= RBUF_SKIP_FCS;
2570 reg &= ~RBUF_SKIP_FCS;
2571 bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
2573 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
2574 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
2576 bcmgenet_intr_disable(priv);
2578 /* Configure backpressure vectors for MoCA */
2579 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2580 reg = bcmgenet_bp_mc_get(priv);
2581 reg |= BIT(priv->hw_params->bp_in_en_shift);
2583 /* bp_mask: back pressure mask */
2584 if (netif_is_multiqueue(priv->dev))
2585 reg |= priv->hw_params->bp_in_mask;
2587 reg &= ~priv->hw_params->bp_in_mask;
2588 bcmgenet_bp_mc_set(priv, reg);
2591 /* Enable MDIO interrupts on GENET v3+ */
2592 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
2593 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2595 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2597 dev_dbg(kdev, "done init umac\n");
2600 static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
2601 void (*cb)(struct work_struct *work))
2603 struct bcmgenet_net_dim *dim = &ring->dim;
2605 INIT_WORK(&dim->dim.work, cb);
2606 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2612 static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
2614 struct bcmgenet_net_dim *dim = &ring->dim;
2615 struct dim_cq_moder moder;
2618 usecs = ring->rx_coalesce_usecs;
2619 pkts = ring->rx_max_coalesced_frames;
2621 /* If DIM was enabled, re-apply default parameters */
2623 moder = net_dim_get_def_rx_moderation(dim->dim.mode);
2628 bcmgenet_set_rx_coalesce(ring, usecs, pkts);
2631 /* Initialize a Tx ring along with corresponding hardware registers */
2632 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
2633 unsigned int index, unsigned int size,
2634 unsigned int start_ptr, unsigned int end_ptr)
2636 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
2637 u32 words_per_bd = WORDS_PER_BD(priv);
2638 u32 flow_period_val = 0;
2640 spin_lock_init(&ring->lock);
2642 ring->index = index;
2643 if (index == DESC_INDEX) {
2645 ring->int_enable = bcmgenet_tx_ring16_int_enable;
2646 ring->int_disable = bcmgenet_tx_ring16_int_disable;
2648 ring->queue = index + 1;
2649 ring->int_enable = bcmgenet_tx_ring_int_enable;
2650 ring->int_disable = bcmgenet_tx_ring_int_disable;
2652 ring->cbs = priv->tx_cbs + start_ptr;
2654 ring->clean_ptr = start_ptr;
2656 ring->free_bds = size;
2657 ring->write_ptr = start_ptr;
2658 ring->cb_ptr = start_ptr;
2659 ring->end_ptr = end_ptr - 1;
2660 ring->prod_index = 0;
2662 /* Set flow period for ring != 16 */
2663 if (index != DESC_INDEX)
2664 flow_period_val = ENET_MAX_MTU_SIZE << 16;
2666 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
2667 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
2668 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2669 /* Disable rate control for now */
2670 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
2672 bcmgenet_tdma_ring_writel(priv, index,
2673 ((size << DMA_RING_SIZE_SHIFT) |
2674 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2676 /* Set start and end address, read and write pointers */
2677 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2679 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2681 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2683 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2686 /* Initialize Tx NAPI */
2687 netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll);
2690 /* Initialize a RDMA ring */
2691 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
2692 unsigned int index, unsigned int size,
2693 unsigned int start_ptr, unsigned int end_ptr)
2695 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
2696 u32 words_per_bd = WORDS_PER_BD(priv);
2700 ring->index = index;
2701 if (index == DESC_INDEX) {
2702 ring->int_enable = bcmgenet_rx_ring16_int_enable;
2703 ring->int_disable = bcmgenet_rx_ring16_int_disable;
2705 ring->int_enable = bcmgenet_rx_ring_int_enable;
2706 ring->int_disable = bcmgenet_rx_ring_int_disable;
2708 ring->cbs = priv->rx_cbs + start_ptr;
2711 ring->read_ptr = start_ptr;
2712 ring->cb_ptr = start_ptr;
2713 ring->end_ptr = end_ptr - 1;
2715 ret = bcmgenet_alloc_rx_buffers(priv, ring);
2719 bcmgenet_init_dim(ring, bcmgenet_dim_work);
2720 bcmgenet_init_rx_coalesce(ring);
2722 /* Initialize Rx NAPI */
2723 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll);
2725 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2726 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2727 bcmgenet_rdma_ring_writel(priv, index,
2728 ((size << DMA_RING_SIZE_SHIFT) |
2729 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2730 bcmgenet_rdma_ring_writel(priv, index,
2731 (DMA_FC_THRESH_LO <<
2732 DMA_XOFF_THRESHOLD_SHIFT) |
2733 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
2735 /* Set start and end address, read and write pointers */
2736 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2738 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2740 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2742 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2748 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2751 struct bcmgenet_tx_ring *ring;
2753 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2754 ring = &priv->tx_rings[i];
2755 napi_enable(&ring->napi);
2756 ring->int_enable(ring);
2759 ring = &priv->tx_rings[DESC_INDEX];
2760 napi_enable(&ring->napi);
2761 ring->int_enable(ring);
2764 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2767 struct bcmgenet_tx_ring *ring;
2769 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2770 ring = &priv->tx_rings[i];
2771 napi_disable(&ring->napi);
2774 ring = &priv->tx_rings[DESC_INDEX];
2775 napi_disable(&ring->napi);
2778 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2781 struct bcmgenet_tx_ring *ring;
2783 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2784 ring = &priv->tx_rings[i];
2785 netif_napi_del(&ring->napi);
2788 ring = &priv->tx_rings[DESC_INDEX];
2789 netif_napi_del(&ring->napi);
2792 /* Initialize Tx queues
2794 * Queues 0-3 are priority-based, each one has 32 descriptors,
2795 * with queue 0 being the highest priority queue.
2797 * Queue 16 is the default Tx queue with
2798 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2800 * The transmit control block pool is then partitioned as follows:
2801 * - Tx queue 0 uses tx_cbs[0..31]
2802 * - Tx queue 1 uses tx_cbs[32..63]
2803 * - Tx queue 2 uses tx_cbs[64..95]
2804 * - Tx queue 3 uses tx_cbs[96..127]
2805 * - Tx queue 16 uses tx_cbs[128..255]
2807 static void bcmgenet_init_tx_queues(struct net_device *dev)
2809 struct bcmgenet_priv *priv = netdev_priv(dev);
2811 u32 dma_ctrl, ring_cfg;
2812 u32 dma_priority[3] = {0, 0, 0};
2814 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
2815 dma_enable = dma_ctrl & DMA_EN;
2816 dma_ctrl &= ~DMA_EN;
2817 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2822 /* Enable strict priority arbiter mode */
2823 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2825 /* Initialize Tx priority queues */
2826 for (i = 0; i < priv->hw_params->tx_queues; i++) {
2827 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
2828 i * priv->hw_params->tx_bds_per_q,
2829 (i + 1) * priv->hw_params->tx_bds_per_q);
2830 ring_cfg |= (1 << i);
2831 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2832 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2833 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
2836 /* Initialize Tx default queue 16 */
2837 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
2838 priv->hw_params->tx_queues *
2839 priv->hw_params->tx_bds_per_q,
2841 ring_cfg |= (1 << DESC_INDEX);
2842 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2843 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
2844 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
2845 DMA_PRIO_REG_SHIFT(DESC_INDEX));
2847 /* Set Tx queue priorities */
2848 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2849 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2850 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2852 /* Enable Tx queues */
2853 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2858 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2861 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2864 struct bcmgenet_rx_ring *ring;
2866 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2867 ring = &priv->rx_rings[i];
2868 napi_enable(&ring->napi);
2869 ring->int_enable(ring);
2872 ring = &priv->rx_rings[DESC_INDEX];
2873 napi_enable(&ring->napi);
2874 ring->int_enable(ring);
2877 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2880 struct bcmgenet_rx_ring *ring;
2882 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2883 ring = &priv->rx_rings[i];
2884 napi_disable(&ring->napi);
2885 cancel_work_sync(&ring->dim.dim.work);
2888 ring = &priv->rx_rings[DESC_INDEX];
2889 napi_disable(&ring->napi);
2890 cancel_work_sync(&ring->dim.dim.work);
2893 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2896 struct bcmgenet_rx_ring *ring;
2898 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2899 ring = &priv->rx_rings[i];
2900 netif_napi_del(&ring->napi);
2903 ring = &priv->rx_rings[DESC_INDEX];
2904 netif_napi_del(&ring->napi);
2907 /* Initialize Rx queues
2909 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2910 * used to direct traffic to these queues.
2912 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2914 static int bcmgenet_init_rx_queues(struct net_device *dev)
2916 struct bcmgenet_priv *priv = netdev_priv(dev);
2923 dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2924 dma_enable = dma_ctrl & DMA_EN;
2925 dma_ctrl &= ~DMA_EN;
2926 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2931 /* Initialize Rx priority queues */
2932 for (i = 0; i < priv->hw_params->rx_queues; i++) {
2933 ret = bcmgenet_init_rx_ring(priv, i,
2934 priv->hw_params->rx_bds_per_q,
2935 i * priv->hw_params->rx_bds_per_q,
2937 priv->hw_params->rx_bds_per_q);
2941 ring_cfg |= (1 << i);
2942 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2945 /* Initialize Rx default queue 16 */
2946 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2947 priv->hw_params->rx_queues *
2948 priv->hw_params->rx_bds_per_q,
2953 ring_cfg |= (1 << DESC_INDEX);
2954 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2957 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2959 /* Configure ring as descriptor ring and re-enable DMA if enabled */
2962 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2967 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2975 /* Disable TDMA to stop add more frames in TX DMA */
2976 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2978 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2980 /* Check TDMA status register to confirm TDMA is disabled */
2981 while (timeout++ < DMA_TIMEOUT_VAL) {
2982 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2983 if (reg & DMA_DISABLED)
2989 if (timeout == DMA_TIMEOUT_VAL) {
2990 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2994 /* Wait 10ms for packet drain in both tx and rx dma */
2995 usleep_range(10000, 20000);
2998 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3000 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3003 /* Check RDMA status register to confirm RDMA is disabled */
3004 while (timeout++ < DMA_TIMEOUT_VAL) {
3005 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
3006 if (reg & DMA_DISABLED)
3012 if (timeout == DMA_TIMEOUT_VAL) {
3013 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
3018 for (i = 0; i < priv->hw_params->rx_queues; i++)
3019 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3020 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3022 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3025 for (i = 0; i < priv->hw_params->tx_queues; i++)
3026 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3027 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
3029 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
3034 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
3036 struct netdev_queue *txq;
3039 bcmgenet_fini_rx_napi(priv);
3040 bcmgenet_fini_tx_napi(priv);
3042 for (i = 0; i < priv->num_tx_bds; i++)
3043 dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
3046 for (i = 0; i < priv->hw_params->tx_queues; i++) {
3047 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
3048 netdev_tx_reset_queue(txq);
3051 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
3052 netdev_tx_reset_queue(txq);
3054 bcmgenet_free_rx_buffers(priv);
3055 kfree(priv->rx_cbs);
3056 kfree(priv->tx_cbs);
3059 /* init_edma: Initialize DMA control register */
3060 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
3066 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
3068 /* Initialize common Rx ring structures */
3069 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
3070 priv->num_rx_bds = TOTAL_DESC;
3071 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
3076 for (i = 0; i < priv->num_rx_bds; i++) {
3077 cb = priv->rx_cbs + i;
3078 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
3081 /* Initialize common TX ring structures */
3082 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
3083 priv->num_tx_bds = TOTAL_DESC;
3084 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
3086 if (!priv->tx_cbs) {
3087 kfree(priv->rx_cbs);
3091 for (i = 0; i < priv->num_tx_bds; i++) {
3092 cb = priv->tx_cbs + i;
3093 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
3097 bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
3098 DMA_SCB_BURST_SIZE);
3100 /* Initialize Rx queues */
3101 ret = bcmgenet_init_rx_queues(priv->dev);
3103 netdev_err(priv->dev, "failed to initialize Rx queues\n");
3104 bcmgenet_free_rx_buffers(priv);
3105 kfree(priv->rx_cbs);
3106 kfree(priv->tx_cbs);
3111 bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
3112 DMA_SCB_BURST_SIZE);
3114 /* Initialize Tx queues */
3115 bcmgenet_init_tx_queues(priv->dev);
3120 /* Interrupt bottom half */
3121 static void bcmgenet_irq_task(struct work_struct *work)
3123 unsigned int status;
3124 struct bcmgenet_priv *priv = container_of(
3125 work, struct bcmgenet_priv, bcmgenet_irq_work);
3127 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
3129 spin_lock_irq(&priv->lock);
3130 status = priv->irq0_stat;
3131 priv->irq0_stat = 0;
3132 spin_unlock_irq(&priv->lock);
3134 if (status & UMAC_IRQ_PHY_DET_R &&
3135 priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
3136 phy_init_hw(priv->dev->phydev);
3137 genphy_config_aneg(priv->dev->phydev);
3140 /* Link UP/DOWN event */
3141 if (status & UMAC_IRQ_LINK_EVENT)
3142 phy_mac_interrupt(priv->dev->phydev);
3146 /* bcmgenet_isr1: handle Rx and Tx priority queues */
3147 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
3149 struct bcmgenet_priv *priv = dev_id;
3150 struct bcmgenet_rx_ring *rx_ring;
3151 struct bcmgenet_tx_ring *tx_ring;
3152 unsigned int index, status;
3154 /* Read irq status */
3155 status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
3156 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
3158 /* clear interrupts */
3159 bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
3161 netif_dbg(priv, intr, priv->dev,
3162 "%s: IRQ=0x%x\n", __func__, status);
3164 /* Check Rx priority queue interrupts */
3165 for (index = 0; index < priv->hw_params->rx_queues; index++) {
3166 if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
3169 rx_ring = &priv->rx_rings[index];
3170 rx_ring->dim.event_ctr++;
3172 if (likely(napi_schedule_prep(&rx_ring->napi))) {
3173 rx_ring->int_disable(rx_ring);
3174 __napi_schedule_irqoff(&rx_ring->napi);
3178 /* Check Tx priority queue interrupts */
3179 for (index = 0; index < priv->hw_params->tx_queues; index++) {
3180 if (!(status & BIT(index)))
3183 tx_ring = &priv->tx_rings[index];
3185 if (likely(napi_schedule_prep(&tx_ring->napi))) {
3186 tx_ring->int_disable(tx_ring);
3187 __napi_schedule_irqoff(&tx_ring->napi);
3194 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
3195 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
3197 struct bcmgenet_priv *priv = dev_id;
3198 struct bcmgenet_rx_ring *rx_ring;
3199 struct bcmgenet_tx_ring *tx_ring;
3200 unsigned int status;
3201 unsigned long flags;
3203 /* Read irq status */
3204 status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
3205 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
3207 /* clear interrupts */
3208 bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
3210 netif_dbg(priv, intr, priv->dev,
3211 "IRQ=0x%x\n", status);
3213 if (status & UMAC_IRQ_RXDMA_DONE) {
3214 rx_ring = &priv->rx_rings[DESC_INDEX];
3215 rx_ring->dim.event_ctr++;
3217 if (likely(napi_schedule_prep(&rx_ring->napi))) {
3218 rx_ring->int_disable(rx_ring);
3219 __napi_schedule_irqoff(&rx_ring->napi);
3223 if (status & UMAC_IRQ_TXDMA_DONE) {
3224 tx_ring = &priv->tx_rings[DESC_INDEX];
3226 if (likely(napi_schedule_prep(&tx_ring->napi))) {
3227 tx_ring->int_disable(tx_ring);
3228 __napi_schedule_irqoff(&tx_ring->napi);
3232 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
3233 status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
3237 /* all other interested interrupts handled in bottom half */
3238 status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
3240 /* Save irq status for bottom-half processing. */
3241 spin_lock_irqsave(&priv->lock, flags);
3242 priv->irq0_stat |= status;
3243 spin_unlock_irqrestore(&priv->lock, flags);
3245 schedule_work(&priv->bcmgenet_irq_work);
3251 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
3253 /* Acknowledge the interrupt */
3257 #ifdef CONFIG_NET_POLL_CONTROLLER
3258 static void bcmgenet_poll_controller(struct net_device *dev)
3260 struct bcmgenet_priv *priv = netdev_priv(dev);
3262 /* Invoke the main RX/TX interrupt handler */
3263 disable_irq(priv->irq0);
3264 bcmgenet_isr0(priv->irq0, priv);
3265 enable_irq(priv->irq0);
3267 /* And the interrupt handler for RX/TX priority queues */
3268 disable_irq(priv->irq1);
3269 bcmgenet_isr1(priv->irq1, priv);
3270 enable_irq(priv->irq1);
3274 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
3278 reg = bcmgenet_rbuf_ctrl_get(priv);
3280 bcmgenet_rbuf_ctrl_set(priv, reg);
3284 bcmgenet_rbuf_ctrl_set(priv, reg);
3288 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
3289 const unsigned char *addr)
3291 bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
3292 bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
3295 static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
3296 unsigned char *addr)
3300 addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
3301 put_unaligned_be32(addr_tmp, &addr[0]);
3302 addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
3303 put_unaligned_be16(addr_tmp, &addr[4]);
3306 /* Returns a reusable dma control register value */
3307 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
3314 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
3315 for (i = 0; i < priv->hw_params->tx_queues; i++)
3316 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3317 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
3319 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
3321 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
3322 for (i = 0; i < priv->hw_params->rx_queues; i++)
3323 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3324 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3326 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3328 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
3330 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
3335 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
3339 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3341 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3343 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
3345 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
3348 static void bcmgenet_netif_start(struct net_device *dev)
3350 struct bcmgenet_priv *priv = netdev_priv(dev);
3352 /* Start the network engine */
3353 bcmgenet_set_rx_mode(dev);
3354 bcmgenet_enable_rx_napi(priv);
3356 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
3358 bcmgenet_enable_tx_napi(priv);
3360 /* Monitor link interrupts now */
3361 bcmgenet_link_intr_enable(priv);
3363 phy_start(dev->phydev);
3366 static int bcmgenet_open(struct net_device *dev)
3368 struct bcmgenet_priv *priv = netdev_priv(dev);
3369 unsigned long dma_ctrl;
3372 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
3374 /* Turn on the clock */
3375 clk_prepare_enable(priv->clk);
3377 /* If this is an internal GPHY, power it back on now, before UniMAC is
3378 * brought out of reset as absolutely no UniMAC activity is allowed
3380 if (priv->internal_phy)
3381 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3383 /* take MAC out of reset */
3384 bcmgenet_umac_reset(priv);
3388 /* Apply features again in case we changed them while interface was
3391 bcmgenet_set_features(dev, dev->features);
3393 bcmgenet_set_hw_addr(priv, dev->dev_addr);
3395 /* Disable RX/TX DMA and flush TX queues */
3396 dma_ctrl = bcmgenet_dma_disable(priv);
3398 /* Reinitialize TDMA and RDMA and SW housekeeping */
3399 ret = bcmgenet_init_dma(priv);
3401 netdev_err(dev, "failed to initialize DMA\n");
3402 goto err_clk_disable;
3405 /* Always enable ring 16 - descriptor ring */
3406 bcmgenet_enable_dma(priv, dma_ctrl);
3409 bcmgenet_hfb_init(priv);
3411 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
3414 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
3418 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
3421 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
3425 ret = bcmgenet_mii_probe(dev);
3427 netdev_err(dev, "failed to connect to PHY\n");
3431 bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
3433 bcmgenet_netif_start(dev);
3435 netif_tx_start_all_queues(dev);
3440 free_irq(priv->irq1, priv);
3442 free_irq(priv->irq0, priv);
3444 bcmgenet_dma_teardown(priv);
3445 bcmgenet_fini_dma(priv);
3447 if (priv->internal_phy)
3448 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3449 clk_disable_unprepare(priv->clk);
3453 static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
3455 struct bcmgenet_priv *priv = netdev_priv(dev);
3457 bcmgenet_disable_tx_napi(priv);
3458 netif_tx_disable(dev);
3460 /* Disable MAC receive */
3461 umac_enable_set(priv, CMD_RX_EN, false);
3463 bcmgenet_dma_teardown(priv);
3465 /* Disable MAC transmit. TX DMA disabled must be done before this */
3466 umac_enable_set(priv, CMD_TX_EN, false);
3469 phy_stop(dev->phydev);
3470 bcmgenet_disable_rx_napi(priv);
3471 bcmgenet_intr_disable(priv);
3473 /* Wait for pending work items to complete. Since interrupts are
3474 * disabled no new work will be scheduled.
3476 cancel_work_sync(&priv->bcmgenet_irq_work);
3479 bcmgenet_tx_reclaim_all(dev);
3480 bcmgenet_fini_dma(priv);
3483 static int bcmgenet_close(struct net_device *dev)
3485 struct bcmgenet_priv *priv = netdev_priv(dev);
3488 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
3490 bcmgenet_netif_stop(dev, false);
3492 /* Really kill the PHY state machine and disconnect from it */
3493 phy_disconnect(dev->phydev);
3495 free_irq(priv->irq0, priv);
3496 free_irq(priv->irq1, priv);
3498 if (priv->internal_phy)
3499 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3501 clk_disable_unprepare(priv->clk);
3506 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
3508 struct bcmgenet_priv *priv = ring->priv;
3509 u32 p_index, c_index, intsts, intmsk;
3510 struct netdev_queue *txq;
3511 unsigned int free_bds;
3514 if (!netif_msg_tx_err(priv))
3517 txq = netdev_get_tx_queue(priv->dev, ring->queue);
3519 spin_lock(&ring->lock);
3520 if (ring->index == DESC_INDEX) {
3521 intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
3522 intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
3524 intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
3525 intmsk = 1 << ring->index;
3527 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
3528 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
3529 txq_stopped = netif_tx_queue_stopped(txq);
3530 free_bds = ring->free_bds;
3531 spin_unlock(&ring->lock);
3533 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
3534 "TX queue status: %s, interrupts: %s\n"
3535 "(sw)free_bds: %d (sw)size: %d\n"
3536 "(sw)p_index: %d (hw)p_index: %d\n"
3537 "(sw)c_index: %d (hw)c_index: %d\n"
3538 "(sw)clean_p: %d (sw)write_p: %d\n"
3539 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
3540 ring->index, ring->queue,
3541 txq_stopped ? "stopped" : "active",
3542 intsts & intmsk ? "enabled" : "disabled",
3543 free_bds, ring->size,
3544 ring->prod_index, p_index & DMA_P_INDEX_MASK,
3545 ring->c_index, c_index & DMA_C_INDEX_MASK,
3546 ring->clean_ptr, ring->write_ptr,
3547 ring->cb_ptr, ring->end_ptr);
3550 static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
3552 struct bcmgenet_priv *priv = netdev_priv(dev);
3553 u32 int0_enable = 0;
3554 u32 int1_enable = 0;
3557 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
3559 for (q = 0; q < priv->hw_params->tx_queues; q++)
3560 bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
3561 bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
3563 bcmgenet_tx_reclaim_all(dev);
3565 for (q = 0; q < priv->hw_params->tx_queues; q++)
3566 int1_enable |= (1 << q);
3568 int0_enable = UMAC_IRQ_TXDMA_DONE;
3570 /* Re-enable TX interrupts if disabled */
3571 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
3572 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
3574 netif_trans_update(dev);
3576 dev->stats.tx_errors++;
3578 netif_tx_wake_all_queues(dev);
3581 #define MAX_MDF_FILTER 17
3583 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3584 const unsigned char *addr,
3587 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3588 UMAC_MDF_ADDR + (*i * 4));
3589 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3590 addr[4] << 8 | addr[5],
3591 UMAC_MDF_ADDR + ((*i + 1) * 4));
3595 static void bcmgenet_set_rx_mode(struct net_device *dev)
3597 struct bcmgenet_priv *priv = netdev_priv(dev);
3598 struct netdev_hw_addr *ha;
3602 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3604 /* Number of filters needed */
3605 nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
3608 * Turn on promicuous mode for three scenarios
3609 * 1. IFF_PROMISC flag is set
3610 * 2. IFF_ALLMULTI flag is set
3611 * 3. The number of filters needed exceeds the number filters
3612 * supported by the hardware.
3614 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
3615 if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
3616 (nfilter > MAX_MDF_FILTER)) {
3618 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3619 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3622 reg &= ~CMD_PROMISC;
3623 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3626 /* update MDF filter */
3629 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
3630 /* my own address.*/
3631 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
3634 netdev_for_each_uc_addr(ha, dev)
3635 bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3638 netdev_for_each_mc_addr(ha, dev)
3639 bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3641 /* Enable filters */
3642 reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
3643 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
3646 /* Set the hardware MAC address. */
3647 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3649 struct sockaddr *addr = p;
3651 /* Setting the MAC address at the hardware level is not possible
3652 * without disabling the UniMAC RX/TX enable bits.
3654 if (netif_running(dev))
3657 eth_hw_addr_set(dev, addr->sa_data);
3662 static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
3664 struct bcmgenet_priv *priv = netdev_priv(dev);
3665 unsigned long tx_bytes = 0, tx_packets = 0;
3666 unsigned long rx_bytes = 0, rx_packets = 0;
3667 unsigned long rx_errors = 0, rx_dropped = 0;
3668 struct bcmgenet_tx_ring *tx_ring;
3669 struct bcmgenet_rx_ring *rx_ring;
3672 for (q = 0; q < priv->hw_params->tx_queues; q++) {
3673 tx_ring = &priv->tx_rings[q];
3674 tx_bytes += tx_ring->bytes;
3675 tx_packets += tx_ring->packets;
3677 tx_ring = &priv->tx_rings[DESC_INDEX];
3678 tx_bytes += tx_ring->bytes;
3679 tx_packets += tx_ring->packets;
3681 for (q = 0; q < priv->hw_params->rx_queues; q++) {
3682 rx_ring = &priv->rx_rings[q];
3684 rx_bytes += rx_ring->bytes;
3685 rx_packets += rx_ring->packets;
3686 rx_errors += rx_ring->errors;
3687 rx_dropped += rx_ring->dropped;
3689 rx_ring = &priv->rx_rings[DESC_INDEX];
3690 rx_bytes += rx_ring->bytes;
3691 rx_packets += rx_ring->packets;
3692 rx_errors += rx_ring->errors;
3693 rx_dropped += rx_ring->dropped;
3695 dev->stats.tx_bytes = tx_bytes;
3696 dev->stats.tx_packets = tx_packets;
3697 dev->stats.rx_bytes = rx_bytes;
3698 dev->stats.rx_packets = rx_packets;
3699 dev->stats.rx_errors = rx_errors;
3700 dev->stats.rx_missed_errors = rx_errors;
3701 dev->stats.rx_dropped = rx_dropped;
3705 static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
3707 struct bcmgenet_priv *priv = netdev_priv(dev);
3709 if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) ||
3710 priv->phy_interface != PHY_INTERFACE_MODE_MOCA)
3714 netif_carrier_on(dev);
3716 netif_carrier_off(dev);
3721 static const struct net_device_ops bcmgenet_netdev_ops = {
3722 .ndo_open = bcmgenet_open,
3723 .ndo_stop = bcmgenet_close,
3724 .ndo_start_xmit = bcmgenet_xmit,
3725 .ndo_tx_timeout = bcmgenet_timeout,
3726 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
3727 .ndo_set_mac_address = bcmgenet_set_mac_addr,
3728 .ndo_eth_ioctl = phy_do_ioctl_running,
3729 .ndo_set_features = bcmgenet_set_features,
3730 #ifdef CONFIG_NET_POLL_CONTROLLER
3731 .ndo_poll_controller = bcmgenet_poll_controller,
3733 .ndo_get_stats = bcmgenet_get_stats,
3734 .ndo_change_carrier = bcmgenet_change_carrier,
3737 /* Array of GENET hardware parameters/characteristics */
3738 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3744 .bp_in_en_shift = 16,
3745 .bp_in_mask = 0xffff,
3746 .hfb_filter_cnt = 16,
3748 .hfb_offset = 0x1000,
3749 .rdma_offset = 0x2000,
3750 .tdma_offset = 0x3000,
3758 .bp_in_en_shift = 16,
3759 .bp_in_mask = 0xffff,
3760 .hfb_filter_cnt = 16,
3762 .tbuf_offset = 0x0600,
3763 .hfb_offset = 0x1000,
3764 .hfb_reg_offset = 0x2000,
3765 .rdma_offset = 0x3000,
3766 .tdma_offset = 0x4000,
3768 .flags = GENET_HAS_EXT,
3775 .bp_in_en_shift = 17,
3776 .bp_in_mask = 0x1ffff,
3777 .hfb_filter_cnt = 48,
3778 .hfb_filter_size = 128,
3780 .tbuf_offset = 0x0600,
3781 .hfb_offset = 0x8000,
3782 .hfb_reg_offset = 0xfc00,
3783 .rdma_offset = 0x10000,
3784 .tdma_offset = 0x11000,
3786 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3787 GENET_HAS_MOCA_LINK_DET,
3794 .bp_in_en_shift = 17,
3795 .bp_in_mask = 0x1ffff,
3796 .hfb_filter_cnt = 48,
3797 .hfb_filter_size = 128,
3799 .tbuf_offset = 0x0600,
3800 .hfb_offset = 0x8000,
3801 .hfb_reg_offset = 0xfc00,
3802 .rdma_offset = 0x2000,
3803 .tdma_offset = 0x4000,
3805 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3806 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3813 .bp_in_en_shift = 17,
3814 .bp_in_mask = 0x1ffff,
3815 .hfb_filter_cnt = 48,
3816 .hfb_filter_size = 128,
3818 .tbuf_offset = 0x0600,
3819 .hfb_offset = 0x8000,
3820 .hfb_reg_offset = 0xfc00,
3821 .rdma_offset = 0x2000,
3822 .tdma_offset = 0x4000,
3824 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3825 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3829 /* Infer hardware parameters from the detected GENET version */
3830 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3832 struct bcmgenet_hw_params *params;
3837 if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
3838 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3839 genet_dma_ring_regs = genet_dma_ring_regs_v4;
3840 } else if (GENET_IS_V3(priv)) {
3841 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3842 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3843 } else if (GENET_IS_V2(priv)) {
3844 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3845 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3846 } else if (GENET_IS_V1(priv)) {
3847 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3848 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3851 /* enum genet_version starts at 1 */
3852 priv->hw_params = &bcmgenet_hw_params[priv->version];
3853 params = priv->hw_params;
3855 /* Read GENET HW version */
3856 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3857 major = (reg >> 24 & 0x0f);
3860 else if (major == 5)
3862 else if (major == 0)
3864 if (major != priv->version) {
3865 dev_err(&priv->pdev->dev,
3866 "GENET version mismatch, got: %d, configured for: %d\n",
3867 major, priv->version);
3870 /* Print the GENET core version */
3871 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3872 major, (reg >> 16) & 0x0f, reg & 0xffff);
3874 /* Store the integrated PHY revision for the MDIO probing function
3875 * to pass this information to the PHY driver. The PHY driver expects
3876 * to find the PHY major revision in bits 15:8 while the GENET register
3877 * stores that information in bits 7:0, account for that.
3879 * On newer chips, starting with PHY revision G0, a new scheme is
3880 * deployed similar to the Starfighter 2 switch with GPHY major
3881 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3882 * is reserved as well as special value 0x01ff, we have a small
3883 * heuristic to check for the new GPHY revision and re-arrange things
3884 * so the GPHY driver is happy.
3886 gphy_rev = reg & 0xffff;
3888 if (GENET_IS_V5(priv)) {
3889 /* The EPHY revision should come from the MDIO registers of
3890 * the PHY not from GENET.
3892 if (gphy_rev != 0) {
3893 pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
3896 /* This is reserved so should require special treatment */
3897 } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3898 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3900 /* This is the good old scheme, just GPHY major, no minor nor patch */
3901 } else if ((gphy_rev & 0xf0) != 0) {
3902 priv->gphy_rev = gphy_rev << 8;
3903 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3904 } else if ((gphy_rev & 0xff00) != 0) {
3905 priv->gphy_rev = gphy_rev;
3908 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3909 if (!(params->flags & GENET_HAS_40BITS))
3910 pr_warn("GENET does not support 40-bits PA\n");
3913 pr_debug("Configuration for version: %d\n"
3914 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3915 "BP << en: %2d, BP msk: 0x%05x\n"
3916 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3917 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3918 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3921 params->tx_queues, params->tx_bds_per_q,
3922 params->rx_queues, params->rx_bds_per_q,
3923 params->bp_in_en_shift, params->bp_in_mask,
3924 params->hfb_filter_cnt, params->qtag_mask,
3925 params->tbuf_offset, params->hfb_offset,
3926 params->hfb_reg_offset,
3927 params->rdma_offset, params->tdma_offset,
3928 params->words_per_bd);
3931 struct bcmgenet_plat_data {
3932 enum bcmgenet_version version;
3933 u32 dma_max_burst_length;
3937 static const struct bcmgenet_plat_data v1_plat_data = {
3938 .version = GENET_V1,
3939 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3942 static const struct bcmgenet_plat_data v2_plat_data = {
3943 .version = GENET_V2,
3944 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3947 static const struct bcmgenet_plat_data v3_plat_data = {
3948 .version = GENET_V3,
3949 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3952 static const struct bcmgenet_plat_data v4_plat_data = {
3953 .version = GENET_V4,
3954 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3957 static const struct bcmgenet_plat_data v5_plat_data = {
3958 .version = GENET_V5,
3959 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3962 static const struct bcmgenet_plat_data bcm2711_plat_data = {
3963 .version = GENET_V5,
3964 .dma_max_burst_length = 0x08,
3967 static const struct bcmgenet_plat_data bcm7712_plat_data = {
3968 .version = GENET_V5,
3969 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3973 static const struct of_device_id bcmgenet_match[] = {
3974 { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
3975 { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
3976 { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
3977 { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
3978 { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
3979 { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
3980 { .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data },
3983 MODULE_DEVICE_TABLE(of, bcmgenet_match);
3985 static int bcmgenet_probe(struct platform_device *pdev)
3987 struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3988 const struct bcmgenet_plat_data *pdata;
3989 struct bcmgenet_priv *priv;
3990 struct net_device *dev;
3994 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3995 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3996 GENET_MAX_MQ_CNT + 1);
3998 dev_err(&pdev->dev, "can't allocate net device\n");
4002 priv = netdev_priv(dev);
4003 priv->irq0 = platform_get_irq(pdev, 0);
4004 if (priv->irq0 < 0) {
4008 priv->irq1 = platform_get_irq(pdev, 1);
4009 if (priv->irq1 < 0) {
4013 priv->wol_irq = platform_get_irq_optional(pdev, 2);
4014 if (priv->wol_irq == -EPROBE_DEFER) {
4015 err = priv->wol_irq;
4019 priv->base = devm_platform_ioremap_resource(pdev, 0);
4020 if (IS_ERR(priv->base)) {
4021 err = PTR_ERR(priv->base);
4025 spin_lock_init(&priv->lock);
4027 /* Set default pause parameters */
4028 priv->autoneg_pause = 1;
4032 SET_NETDEV_DEV(dev, &pdev->dev);
4033 dev_set_drvdata(&pdev->dev, dev);
4034 dev->watchdog_timeo = 2 * HZ;
4035 dev->ethtool_ops = &bcmgenet_ethtool_ops;
4036 dev->netdev_ops = &bcmgenet_netdev_ops;
4038 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
4040 /* Set default features */
4041 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
4043 dev->hw_features |= dev->features;
4044 dev->vlan_features |= dev->features;
4046 /* Request the WOL interrupt and advertise suspend if available */
4047 priv->wol_irq_disabled = true;
4048 if (priv->wol_irq > 0) {
4049 err = devm_request_irq(&pdev->dev, priv->wol_irq,
4050 bcmgenet_wol_isr, 0, dev->name, priv);
4052 device_set_wakeup_capable(&pdev->dev, 1);
4055 /* Set the needed headroom to account for any possible
4056 * features enabling/disabling at runtime
4058 dev->needed_headroom += 64;
4063 pdata = device_get_match_data(&pdev->dev);
4065 priv->version = pdata->version;
4066 priv->dma_max_burst_length = pdata->dma_max_burst_length;
4067 priv->ephy_16nm = pdata->ephy_16nm;
4069 priv->version = pd->genet_version;
4070 priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
4073 priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
4074 if (IS_ERR(priv->clk)) {
4075 dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
4076 err = PTR_ERR(priv->clk);
4080 err = clk_prepare_enable(priv->clk);
4084 bcmgenet_set_hw_params(priv);
4087 if (priv->hw_params->flags & GENET_HAS_40BITS)
4088 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
4090 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4092 goto err_clk_disable;
4094 /* Mii wait queue */
4095 init_waitqueue_head(&priv->wq);
4096 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
4097 priv->rx_buf_len = RX_BUF_LENGTH;
4098 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
4100 priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol");
4101 if (IS_ERR(priv->clk_wol)) {
4102 dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
4103 err = PTR_ERR(priv->clk_wol);
4104 goto err_clk_disable;
4107 priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
4108 if (IS_ERR(priv->clk_eee)) {
4109 dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
4110 err = PTR_ERR(priv->clk_eee);
4111 goto err_clk_disable;
4114 /* If this is an internal GPHY, power it on now, before UniMAC is
4115 * brought out of reset as absolutely no UniMAC activity is allowed
4117 if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
4118 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
4120 if (pd && !IS_ERR_OR_NULL(pd->mac_address))
4121 eth_hw_addr_set(dev, pd->mac_address);
4123 if (device_get_ethdev_address(&pdev->dev, dev))
4124 if (has_acpi_companion(&pdev->dev)) {
4127 bcmgenet_get_hw_addr(priv, addr);
4128 eth_hw_addr_set(dev, addr);
4131 if (!is_valid_ether_addr(dev->dev_addr)) {
4132 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
4133 eth_hw_addr_random(dev);
4138 err = bcmgenet_mii_init(dev);
4140 goto err_clk_disable;
4142 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
4143 * just the ring 16 descriptor based TX
4145 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
4146 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
4148 /* Set default coalescing parameters */
4149 for (i = 0; i < priv->hw_params->rx_queues; i++)
4150 priv->rx_rings[i].rx_max_coalesced_frames = 1;
4151 priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
4153 /* libphy will determine the link state */
4154 netif_carrier_off(dev);
4156 /* Turn off the main clock, WOL clock is handled separately */
4157 clk_disable_unprepare(priv->clk);
4159 err = register_netdev(dev);
4161 bcmgenet_mii_exit(dev);
4168 clk_disable_unprepare(priv->clk);
4174 static int bcmgenet_remove(struct platform_device *pdev)
4176 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
4178 dev_set_drvdata(&pdev->dev, NULL);
4179 unregister_netdev(priv->dev);
4180 bcmgenet_mii_exit(priv->dev);
4181 free_netdev(priv->dev);
4186 static void bcmgenet_shutdown(struct platform_device *pdev)
4188 bcmgenet_remove(pdev);
4191 #ifdef CONFIG_PM_SLEEP
4192 static int bcmgenet_resume_noirq(struct device *d)
4194 struct net_device *dev = dev_get_drvdata(d);
4195 struct bcmgenet_priv *priv = netdev_priv(dev);
4199 if (!netif_running(dev))
4202 /* Turn on the clock */
4203 ret = clk_prepare_enable(priv->clk);
4207 if (device_may_wakeup(d) && priv->wolopts) {
4208 /* Account for Wake-on-LAN events and clear those events
4209 * (Some devices need more time between enabling the clocks
4210 * and the interrupt register reflecting the wake event so
4211 * read the register twice)
4213 reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
4214 reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
4215 if (reg & UMAC_IRQ_WAKE_EVENT)
4216 pm_wakeup_event(&priv->pdev->dev, 0);
4219 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
4224 static int bcmgenet_resume(struct device *d)
4226 struct net_device *dev = dev_get_drvdata(d);
4227 struct bcmgenet_priv *priv = netdev_priv(dev);
4228 struct bcmgenet_rxnfc_rule *rule;
4229 unsigned long dma_ctrl;
4232 if (!netif_running(dev))
4235 /* From WOL-enabled suspend, switch to regular clock */
4236 if (device_may_wakeup(d) && priv->wolopts)
4237 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
4239 /* If this is an internal GPHY, power it back on now, before UniMAC is
4240 * brought out of reset as absolutely no UniMAC activity is allowed
4242 if (priv->internal_phy)
4243 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
4245 bcmgenet_umac_reset(priv);
4249 phy_init_hw(dev->phydev);
4251 /* Speed settings must be restored */
4252 genphy_config_aneg(dev->phydev);
4253 bcmgenet_mii_config(priv->dev, false);
4255 /* Restore enabled features */
4256 bcmgenet_set_features(dev, dev->features);
4258 bcmgenet_set_hw_addr(priv, dev->dev_addr);
4260 /* Restore hardware filters */
4261 bcmgenet_hfb_clear(priv);
4262 list_for_each_entry(rule, &priv->rxnfc_list, list)
4263 if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
4264 bcmgenet_hfb_create_rxnfc_filter(priv, rule);
4266 /* Disable RX/TX DMA and flush TX queues */
4267 dma_ctrl = bcmgenet_dma_disable(priv);
4269 /* Reinitialize TDMA and RDMA and SW housekeeping */
4270 ret = bcmgenet_init_dma(priv);
4272 netdev_err(dev, "failed to initialize DMA\n");
4273 goto out_clk_disable;
4276 /* Always enable ring 16 - descriptor ring */
4277 bcmgenet_enable_dma(priv, dma_ctrl);
4279 if (!device_may_wakeup(d))
4280 phy_resume(dev->phydev);
4282 if (priv->eee.eee_enabled)
4283 bcmgenet_eee_enable_set(dev, true);
4285 bcmgenet_netif_start(dev);
4287 netif_device_attach(dev);
4292 if (priv->internal_phy)
4293 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
4294 clk_disable_unprepare(priv->clk);
4298 static int bcmgenet_suspend(struct device *d)
4300 struct net_device *dev = dev_get_drvdata(d);
4301 struct bcmgenet_priv *priv = netdev_priv(dev);
4303 if (!netif_running(dev))
4306 netif_device_detach(dev);
4308 bcmgenet_netif_stop(dev, true);
4310 if (!device_may_wakeup(d))
4311 phy_suspend(dev->phydev);
4313 /* Disable filtering */
4314 bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
4319 static int bcmgenet_suspend_noirq(struct device *d)
4321 struct net_device *dev = dev_get_drvdata(d);
4322 struct bcmgenet_priv *priv = netdev_priv(dev);
4325 if (!netif_running(dev))
4328 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
4329 if (device_may_wakeup(d) && priv->wolopts)
4330 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
4331 else if (priv->internal_phy)
4332 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
4334 /* Let the framework handle resumption and leave the clocks on */
4338 /* Turn off the clocks */
4339 clk_disable_unprepare(priv->clk);
4344 #define bcmgenet_suspend NULL
4345 #define bcmgenet_suspend_noirq NULL
4346 #define bcmgenet_resume NULL
4347 #define bcmgenet_resume_noirq NULL
4348 #endif /* CONFIG_PM_SLEEP */
4350 static const struct dev_pm_ops bcmgenet_pm_ops = {
4351 .suspend = bcmgenet_suspend,
4352 .suspend_noirq = bcmgenet_suspend_noirq,
4353 .resume = bcmgenet_resume,
4354 .resume_noirq = bcmgenet_resume_noirq,
4357 static const struct acpi_device_id genet_acpi_match[] = {
4358 { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
4361 MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
4363 static struct platform_driver bcmgenet_driver = {
4364 .probe = bcmgenet_probe,
4365 .remove = bcmgenet_remove,
4366 .shutdown = bcmgenet_shutdown,
4369 .of_match_table = bcmgenet_match,
4370 .pm = &bcmgenet_pm_ops,
4371 .acpi_match_table = genet_acpi_match,
4374 module_platform_driver(bcmgenet_driver);
4376 MODULE_AUTHOR("Broadcom Corporation");
4377 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
4378 MODULE_ALIAS("platform:bcmgenet");
4379 MODULE_LICENSE("GPL");
4380 MODULE_SOFTDEP("pre: mdio-bcm-unimac");