1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/regmap.h>
15 #include <linux/clk.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/if_vlan.h>
18 #include <linux/reset.h>
19 #include <linux/tcp.h>
20 #include <linux/interrupt.h>
21 #include <linux/pinctrl/devinfo.h>
22 #include <linux/phylink.h>
23 #include <linux/jhash.h>
24 #include <linux/bitfield.h>
26 #include <net/dst_metadata.h>
28 #include "mtk_eth_soc.h"
31 static int mtk_msg_level = -1;
32 module_param_named(msg_level, mtk_msg_level, int, 0);
33 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
35 #define MTK_ETHTOOL_STAT(x) { #x, \
36 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
38 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
39 offsetof(struct mtk_hw_stats, xdp_stats.x) / \
42 static const struct mtk_reg_map mtk_reg_map = {
43 .tx_irq_mask = 0x1a1c,
44 .tx_irq_status = 0x1a18,
54 .adma_rx_dbg0 = 0x0a38,
67 .tx_sch_rate = 0x1a14,
80 .gdma_to_ppe = 0x4444,
90 static const struct mtk_reg_map mt7628_reg_map = {
91 .tx_irq_mask = 0x0a28,
92 .tx_irq_status = 0x0a20,
100 .irq_status = 0x0a20,
106 static const struct mtk_reg_map mt7986_reg_map = {
107 .tx_irq_mask = 0x461c,
108 .tx_irq_status = 0x4618,
111 .rx_cnt_cfg = 0x6104,
116 .irq_status = 0x6220,
118 .adma_rx_dbg0 = 0x6238,
125 .rx_cnt_cfg = 0x4504,
141 .tx_sch_rate = 0x4798,
144 .gdma_to_ppe = 0x3333,
150 .pse_iq_sta = 0x0180,
151 .pse_oq_sta = 0x01a0,
154 /* strings used by ethtool */
155 static const struct mtk_ethtool_stats {
156 char str[ETH_GSTRING_LEN];
158 } mtk_ethtool_stats[] = {
159 MTK_ETHTOOL_STAT(tx_bytes),
160 MTK_ETHTOOL_STAT(tx_packets),
161 MTK_ETHTOOL_STAT(tx_skip),
162 MTK_ETHTOOL_STAT(tx_collisions),
163 MTK_ETHTOOL_STAT(rx_bytes),
164 MTK_ETHTOOL_STAT(rx_packets),
165 MTK_ETHTOOL_STAT(rx_overflow),
166 MTK_ETHTOOL_STAT(rx_fcs_errors),
167 MTK_ETHTOOL_STAT(rx_short_errors),
168 MTK_ETHTOOL_STAT(rx_long_errors),
169 MTK_ETHTOOL_STAT(rx_checksum_errors),
170 MTK_ETHTOOL_STAT(rx_flow_control_packets),
171 MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
172 MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
173 MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
174 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
175 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
176 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
177 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
180 static const char * const mtk_clks_source_name[] = {
181 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
182 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
183 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
184 "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
187 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
189 __raw_writel(val, eth->base + reg);
192 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
194 return __raw_readl(eth->base + reg);
197 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
201 val = mtk_r32(eth, reg);
204 mtk_w32(eth, val, reg);
208 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
210 unsigned long t_start = jiffies;
213 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
215 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
220 dev_err(eth->dev, "mdio: MDIO timeout\n");
224 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
229 ret = mtk_mdio_busy_wait(eth);
233 mtk_w32(eth, PHY_IAC_ACCESS |
236 PHY_IAC_REG(phy_reg) |
237 PHY_IAC_ADDR(phy_addr) |
238 PHY_IAC_DATA(write_data),
241 ret = mtk_mdio_busy_wait(eth);
248 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
249 u32 devad, u32 phy_reg, u32 write_data)
253 ret = mtk_mdio_busy_wait(eth);
257 mtk_w32(eth, PHY_IAC_ACCESS |
259 PHY_IAC_CMD_C45_ADDR |
261 PHY_IAC_ADDR(phy_addr) |
262 PHY_IAC_DATA(phy_reg),
265 ret = mtk_mdio_busy_wait(eth);
269 mtk_w32(eth, PHY_IAC_ACCESS |
273 PHY_IAC_ADDR(phy_addr) |
274 PHY_IAC_DATA(write_data),
277 ret = mtk_mdio_busy_wait(eth);
284 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
288 ret = mtk_mdio_busy_wait(eth);
292 mtk_w32(eth, PHY_IAC_ACCESS |
294 PHY_IAC_CMD_C22_READ |
295 PHY_IAC_REG(phy_reg) |
296 PHY_IAC_ADDR(phy_addr),
299 ret = mtk_mdio_busy_wait(eth);
303 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
306 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
307 u32 devad, u32 phy_reg)
311 ret = mtk_mdio_busy_wait(eth);
315 mtk_w32(eth, PHY_IAC_ACCESS |
317 PHY_IAC_CMD_C45_ADDR |
319 PHY_IAC_ADDR(phy_addr) |
320 PHY_IAC_DATA(phy_reg),
323 ret = mtk_mdio_busy_wait(eth);
327 mtk_w32(eth, PHY_IAC_ACCESS |
329 PHY_IAC_CMD_C45_READ |
331 PHY_IAC_ADDR(phy_addr),
334 ret = mtk_mdio_busy_wait(eth);
338 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
341 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
342 int phy_reg, u16 val)
344 struct mtk_eth *eth = bus->priv;
346 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
349 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
350 int devad, int phy_reg, u16 val)
352 struct mtk_eth *eth = bus->priv;
354 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
357 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
359 struct mtk_eth *eth = bus->priv;
361 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
364 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
367 struct mtk_eth *eth = bus->priv;
369 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
372 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
373 phy_interface_t interface)
377 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
378 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
380 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
381 ETHSYS_TRGMII_MT7621_MASK, val);
386 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
387 phy_interface_t interface, int speed)
393 if (interface == PHY_INTERFACE_MODE_TRGMII) {
394 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
395 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
397 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
401 if (speed == SPEED_1000) {
402 intf = INTF_MODE_RGMII_1000;
404 rck = RCK_CTRL_RGMII_1000;
405 tck = TCK_CTRL_RGMII_1000;
407 intf = INTF_MODE_RGMII_10_100;
409 rck = RCK_CTRL_RGMII_10_100;
410 tck = TCK_CTRL_RGMII_10_100;
413 mtk_w32(eth, intf, INTF_MODE);
415 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
416 ETHSYS_TRGMII_CLK_SEL362_5,
417 ETHSYS_TRGMII_CLK_SEL362_5);
419 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], rate);
421 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
423 mtk_w32(eth, rck, TRGMII_RCK_CTRL);
424 mtk_w32(eth, tck, TRGMII_TCK_CTRL);
427 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
428 phy_interface_t interface)
430 struct mtk_mac *mac = container_of(config, struct mtk_mac,
432 struct mtk_eth *eth = mac->hw;
435 if (interface == PHY_INTERFACE_MODE_SGMII ||
436 phy_interface_mode_is_8023z(interface)) {
437 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
440 return mtk_sgmii_select_pcs(eth->sgmii, sid);
446 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
447 const struct phylink_link_state *state)
449 struct mtk_mac *mac = container_of(config, struct mtk_mac,
451 struct mtk_eth *eth = mac->hw;
452 int val, ge_mode, err = 0;
455 /* MT76x8 has no hardware settings between for the MAC */
456 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
457 mac->interface != state->interface) {
458 /* Setup soc pin functions */
459 switch (state->interface) {
460 case PHY_INTERFACE_MODE_TRGMII:
461 case PHY_INTERFACE_MODE_RGMII_TXID:
462 case PHY_INTERFACE_MODE_RGMII_RXID:
463 case PHY_INTERFACE_MODE_RGMII_ID:
464 case PHY_INTERFACE_MODE_RGMII:
465 case PHY_INTERFACE_MODE_MII:
466 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
467 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
472 case PHY_INTERFACE_MODE_1000BASEX:
473 case PHY_INTERFACE_MODE_2500BASEX:
474 case PHY_INTERFACE_MODE_SGMII:
475 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
479 case PHY_INTERFACE_MODE_GMII:
480 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
481 err = mtk_gmac_gephy_path_setup(eth, mac->id);
490 /* Setup clock for 1st gmac */
491 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
492 !phy_interface_mode_is_8023z(state->interface) &&
493 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
494 if (MTK_HAS_CAPS(mac->hw->soc->caps,
495 MTK_TRGMII_MT7621_CLK)) {
496 if (mt7621_gmac0_rgmii_adjust(mac->hw,
500 /* FIXME: this is incorrect. Not only does it
501 * use state->speed (which is not guaranteed
502 * to be correct) but it also makes use of it
503 * in a code path that will only be reachable
504 * when the PHY interface mode changes, not
505 * when the speed changes. Consequently, RGMII
506 * is probably broken.
508 mtk_gmac0_rgmii_adjust(mac->hw,
512 /* mt7623_pad_clk_setup */
513 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
515 TD_DM_DRVP(8) | TD_DM_DRVN(8),
518 /* Assert/release MT7623 RXC reset */
519 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
521 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
525 switch (state->interface) {
526 case PHY_INTERFACE_MODE_MII:
527 case PHY_INTERFACE_MODE_GMII:
535 /* put the gmac into the right mode */
536 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
537 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
538 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
539 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
541 mac->interface = state->interface;
545 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
546 phy_interface_mode_is_8023z(state->interface)) {
547 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
550 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
552 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
554 ~(u32)SYSCFG0_SGMII_MASK);
556 /* Save the syscfg0 value for mac_finish */
558 } else if (phylink_autoneg_inband(mode)) {
560 "In-band mode not supported in non SGMII mode!\n");
567 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
568 mac->id, phy_modes(state->interface));
572 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
573 mac->id, phy_modes(state->interface), err);
576 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
577 phy_interface_t interface)
579 struct mtk_mac *mac = container_of(config, struct mtk_mac,
581 struct mtk_eth *eth = mac->hw;
582 u32 mcr_cur, mcr_new;
585 if (interface == PHY_INTERFACE_MODE_SGMII ||
586 phy_interface_mode_is_8023z(interface))
587 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
588 SYSCFG0_SGMII_MASK, mac->syscfg0);
591 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
593 mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
594 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
595 MAC_MCR_RX_FIFO_CLR_DIS;
597 /* Only update control register when needed! */
598 if (mcr_new != mcr_cur)
599 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
604 static void mtk_mac_pcs_get_state(struct phylink_config *config,
605 struct phylink_link_state *state)
607 struct mtk_mac *mac = container_of(config, struct mtk_mac,
609 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
611 state->link = (pmsr & MAC_MSR_LINK);
612 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
614 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
616 state->speed = SPEED_10;
618 case MAC_MSR_SPEED_100:
619 state->speed = SPEED_100;
621 case MAC_MSR_SPEED_1000:
622 state->speed = SPEED_1000;
625 state->speed = SPEED_UNKNOWN;
629 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
630 if (pmsr & MAC_MSR_RX_FC)
631 state->pause |= MLO_PAUSE_RX;
632 if (pmsr & MAC_MSR_TX_FC)
633 state->pause |= MLO_PAUSE_TX;
636 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
637 phy_interface_t interface)
639 struct mtk_mac *mac = container_of(config, struct mtk_mac,
641 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
643 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
644 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
647 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
650 const struct mtk_soc_data *soc = eth->soc;
653 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
656 val = MTK_QTX_SCH_MIN_RATE_EN |
657 /* minimum: 10 Mbps */
658 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
659 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
660 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
661 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
662 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
664 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
667 val |= MTK_QTX_SCH_MAX_RATE_EN |
668 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
669 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
670 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
673 val |= MTK_QTX_SCH_MAX_RATE_EN |
674 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
675 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
676 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
679 val |= MTK_QTX_SCH_MAX_RATE_EN |
680 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
681 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
682 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
690 val |= MTK_QTX_SCH_MAX_RATE_EN |
691 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
692 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
693 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
696 val |= MTK_QTX_SCH_MAX_RATE_EN |
697 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
698 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
699 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
702 val |= MTK_QTX_SCH_MAX_RATE_EN |
703 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
704 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
705 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
712 ofs = MTK_QTX_OFFSET * idx;
713 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
716 static void mtk_mac_link_up(struct phylink_config *config,
717 struct phy_device *phy,
718 unsigned int mode, phy_interface_t interface,
719 int speed, int duplex, bool tx_pause, bool rx_pause)
721 struct mtk_mac *mac = container_of(config, struct mtk_mac,
725 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
726 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
727 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
728 MAC_MCR_FORCE_RX_FC);
730 /* Configure speed */
734 mcr |= MAC_MCR_SPEED_1000;
737 mcr |= MAC_MCR_SPEED_100;
741 mtk_set_queue_speed(mac->hw, mac->id, speed);
743 /* Configure duplex */
744 if (duplex == DUPLEX_FULL)
745 mcr |= MAC_MCR_FORCE_DPX;
747 /* Configure pause modes - phylink will avoid these for half duplex */
749 mcr |= MAC_MCR_FORCE_TX_FC;
751 mcr |= MAC_MCR_FORCE_RX_FC;
753 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
754 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
757 static const struct phylink_mac_ops mtk_phylink_ops = {
758 .mac_select_pcs = mtk_mac_select_pcs,
759 .mac_pcs_get_state = mtk_mac_pcs_get_state,
760 .mac_config = mtk_mac_config,
761 .mac_finish = mtk_mac_finish,
762 .mac_link_down = mtk_mac_link_down,
763 .mac_link_up = mtk_mac_link_up,
766 static int mtk_mdio_init(struct mtk_eth *eth)
768 struct device_node *mii_np;
771 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
773 dev_err(eth->dev, "no %s child node found", "mdio-bus");
777 if (!of_device_is_available(mii_np)) {
782 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
788 eth->mii_bus->name = "mdio";
789 eth->mii_bus->read = mtk_mdio_read_c22;
790 eth->mii_bus->write = mtk_mdio_write_c22;
791 eth->mii_bus->read_c45 = mtk_mdio_read_c45;
792 eth->mii_bus->write_c45 = mtk_mdio_write_c45;
793 eth->mii_bus->priv = eth;
794 eth->mii_bus->parent = eth->dev;
796 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
797 ret = of_mdiobus_register(eth->mii_bus, mii_np);
804 static void mtk_mdio_cleanup(struct mtk_eth *eth)
809 mdiobus_unregister(eth->mii_bus);
812 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
817 spin_lock_irqsave(ð->tx_irq_lock, flags);
818 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
819 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
820 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
823 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
828 spin_lock_irqsave(ð->tx_irq_lock, flags);
829 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
830 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
831 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
834 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
839 spin_lock_irqsave(ð->rx_irq_lock, flags);
840 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
841 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
842 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
845 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
850 spin_lock_irqsave(ð->rx_irq_lock, flags);
851 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
852 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
853 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
856 static int mtk_set_mac_address(struct net_device *dev, void *p)
858 int ret = eth_mac_addr(dev, p);
859 struct mtk_mac *mac = netdev_priv(dev);
860 struct mtk_eth *eth = mac->hw;
861 const char *macaddr = dev->dev_addr;
866 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
869 spin_lock_bh(&mac->hw->page_lock);
870 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
871 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
872 MT7628_SDM_MAC_ADRH);
873 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
874 (macaddr[4] << 8) | macaddr[5],
875 MT7628_SDM_MAC_ADRL);
877 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
878 MTK_GDMA_MAC_ADRH(mac->id));
879 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
880 (macaddr[4] << 8) | macaddr[5],
881 MTK_GDMA_MAC_ADRL(mac->id));
883 spin_unlock_bh(&mac->hw->page_lock);
888 void mtk_stats_update_mac(struct mtk_mac *mac)
890 struct mtk_hw_stats *hw_stats = mac->hw_stats;
891 struct mtk_eth *eth = mac->hw;
893 u64_stats_update_begin(&hw_stats->syncp);
895 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
896 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
897 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
898 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
899 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
900 hw_stats->rx_checksum_errors +=
901 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
903 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
904 unsigned int offs = hw_stats->reg_offset;
907 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
908 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
910 hw_stats->rx_bytes += (stats << 32);
911 hw_stats->rx_packets +=
912 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
913 hw_stats->rx_overflow +=
914 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
915 hw_stats->rx_fcs_errors +=
916 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
917 hw_stats->rx_short_errors +=
918 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
919 hw_stats->rx_long_errors +=
920 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
921 hw_stats->rx_checksum_errors +=
922 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
923 hw_stats->rx_flow_control_packets +=
924 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
926 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
927 hw_stats->tx_collisions +=
928 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
929 hw_stats->tx_bytes +=
930 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
931 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
933 hw_stats->tx_bytes += (stats << 32);
934 hw_stats->tx_packets +=
935 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
938 u64_stats_update_end(&hw_stats->syncp);
941 static void mtk_stats_update(struct mtk_eth *eth)
945 for (i = 0; i < MTK_MAC_COUNT; i++) {
946 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
948 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
949 mtk_stats_update_mac(eth->mac[i]);
950 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
955 static void mtk_get_stats64(struct net_device *dev,
956 struct rtnl_link_stats64 *storage)
958 struct mtk_mac *mac = netdev_priv(dev);
959 struct mtk_hw_stats *hw_stats = mac->hw_stats;
962 if (netif_running(dev) && netif_device_present(dev)) {
963 if (spin_trylock_bh(&hw_stats->stats_lock)) {
964 mtk_stats_update_mac(mac);
965 spin_unlock_bh(&hw_stats->stats_lock);
970 start = u64_stats_fetch_begin(&hw_stats->syncp);
971 storage->rx_packets = hw_stats->rx_packets;
972 storage->tx_packets = hw_stats->tx_packets;
973 storage->rx_bytes = hw_stats->rx_bytes;
974 storage->tx_bytes = hw_stats->tx_bytes;
975 storage->collisions = hw_stats->tx_collisions;
976 storage->rx_length_errors = hw_stats->rx_short_errors +
977 hw_stats->rx_long_errors;
978 storage->rx_over_errors = hw_stats->rx_overflow;
979 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
980 storage->rx_errors = hw_stats->rx_checksum_errors;
981 storage->tx_aborted_errors = hw_stats->tx_skip;
982 } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
984 storage->tx_errors = dev->stats.tx_errors;
985 storage->rx_dropped = dev->stats.rx_dropped;
986 storage->tx_dropped = dev->stats.tx_dropped;
989 static inline int mtk_max_frag_size(int mtu)
991 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
992 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
993 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
995 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
996 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
999 static inline int mtk_max_buf_size(int frag_size)
1001 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1002 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1004 WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1009 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1010 struct mtk_rx_dma_v2 *dma_rxd)
1012 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1013 if (!(rxd->rxd2 & RX_DMA_DONE))
1016 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1017 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1018 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1019 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1020 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1021 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1027 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1029 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1032 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1035 return (void *)data;
1038 /* the qdma core needs scratch memory to be setup */
1039 static int mtk_init_fq_dma(struct mtk_eth *eth)
1041 const struct mtk_soc_data *soc = eth->soc;
1042 dma_addr_t phy_ring_tail;
1043 int cnt = MTK_QDMA_RING_SIZE;
1044 dma_addr_t dma_addr;
1047 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1048 cnt * soc->txrx.txd_size,
1049 ð->phy_scratch_ring,
1051 if (unlikely(!eth->scratch_ring))
1054 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1055 if (unlikely(!eth->scratch_head))
1058 dma_addr = dma_map_single(eth->dma_dev,
1059 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1061 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1064 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
1066 for (i = 0; i < cnt; i++) {
1067 struct mtk_tx_dma_v2 *txd;
1069 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1070 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1072 txd->txd2 = eth->phy_scratch_ring +
1073 (i + 1) * soc->txrx.txd_size;
1075 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1077 if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
1085 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1086 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1087 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1088 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1093 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1095 return ring->dma + (desc - ring->phys);
1098 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1099 void *txd, u32 txd_size)
1101 int idx = (txd - ring->dma) / txd_size;
1103 return &ring->buf[idx];
1106 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1107 struct mtk_tx_dma *dma)
1109 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1112 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1114 return (dma - ring->dma) / txd_size;
1117 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1118 struct xdp_frame_bulk *bq, bool napi)
1120 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1121 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1122 dma_unmap_single(eth->dma_dev,
1123 dma_unmap_addr(tx_buf, dma_addr0),
1124 dma_unmap_len(tx_buf, dma_len0),
1126 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1127 dma_unmap_page(eth->dma_dev,
1128 dma_unmap_addr(tx_buf, dma_addr0),
1129 dma_unmap_len(tx_buf, dma_len0),
1133 if (dma_unmap_len(tx_buf, dma_len0)) {
1134 dma_unmap_page(eth->dma_dev,
1135 dma_unmap_addr(tx_buf, dma_addr0),
1136 dma_unmap_len(tx_buf, dma_len0),
1140 if (dma_unmap_len(tx_buf, dma_len1)) {
1141 dma_unmap_page(eth->dma_dev,
1142 dma_unmap_addr(tx_buf, dma_addr1),
1143 dma_unmap_len(tx_buf, dma_len1),
1148 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1149 if (tx_buf->type == MTK_TYPE_SKB) {
1150 struct sk_buff *skb = tx_buf->data;
1153 napi_consume_skb(skb, napi);
1155 dev_kfree_skb_any(skb);
1157 struct xdp_frame *xdpf = tx_buf->data;
1159 if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1160 xdp_return_frame_rx_napi(xdpf);
1162 xdp_return_frame_bulk(xdpf, bq);
1164 xdp_return_frame(xdpf);
1168 tx_buf->data = NULL;
1171 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1172 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1173 size_t size, int idx)
1175 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1176 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1177 dma_unmap_len_set(tx_buf, dma_len0, size);
1180 txd->txd3 = mapped_addr;
1181 txd->txd2 |= TX_DMA_PLEN1(size);
1182 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1183 dma_unmap_len_set(tx_buf, dma_len1, size);
1185 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1186 txd->txd1 = mapped_addr;
1187 txd->txd2 = TX_DMA_PLEN0(size);
1188 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1189 dma_unmap_len_set(tx_buf, dma_len0, size);
1194 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1195 struct mtk_tx_dma_desc_info *info)
1197 struct mtk_mac *mac = netdev_priv(dev);
1198 struct mtk_eth *eth = mac->hw;
1199 struct mtk_tx_dma *desc = txd;
1202 WRITE_ONCE(desc->txd1, info->addr);
1204 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1205 FIELD_PREP(TX_DMA_PQID, info->qid);
1208 WRITE_ONCE(desc->txd3, data);
1210 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1214 /* tx checksum offload */
1216 data |= TX_DMA_CHKSUM;
1217 /* vlan header offload */
1219 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1221 WRITE_ONCE(desc->txd4, data);
1224 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1225 struct mtk_tx_dma_desc_info *info)
1227 struct mtk_mac *mac = netdev_priv(dev);
1228 struct mtk_tx_dma_v2 *desc = txd;
1229 struct mtk_eth *eth = mac->hw;
1232 WRITE_ONCE(desc->txd1, info->addr);
1234 data = TX_DMA_PLEN0(info->size);
1237 WRITE_ONCE(desc->txd3, data);
1239 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1240 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1241 WRITE_ONCE(desc->txd4, data);
1246 data |= TX_DMA_TSO_V2;
1247 /* tx checksum offload */
1249 data |= TX_DMA_CHKSUM_V2;
1251 WRITE_ONCE(desc->txd5, data);
1254 if (info->first && info->vlan)
1255 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1256 WRITE_ONCE(desc->txd6, data);
1258 WRITE_ONCE(desc->txd7, 0);
1259 WRITE_ONCE(desc->txd8, 0);
1262 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1263 struct mtk_tx_dma_desc_info *info)
1265 struct mtk_mac *mac = netdev_priv(dev);
1266 struct mtk_eth *eth = mac->hw;
1268 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1269 mtk_tx_set_dma_desc_v2(dev, txd, info);
1271 mtk_tx_set_dma_desc_v1(dev, txd, info);
1274 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1275 int tx_num, struct mtk_tx_ring *ring, bool gso)
1277 struct mtk_tx_dma_desc_info txd_info = {
1278 .size = skb_headlen(skb),
1280 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1281 .vlan = skb_vlan_tag_present(skb),
1282 .qid = skb_get_queue_mapping(skb),
1283 .vlan_tci = skb_vlan_tag_get(skb),
1285 .last = !skb_is_nonlinear(skb),
1287 struct netdev_queue *txq;
1288 struct mtk_mac *mac = netdev_priv(dev);
1289 struct mtk_eth *eth = mac->hw;
1290 const struct mtk_soc_data *soc = eth->soc;
1291 struct mtk_tx_dma *itxd, *txd;
1292 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1293 struct mtk_tx_buf *itx_buf, *tx_buf;
1295 int queue = skb_get_queue_mapping(skb);
1298 txq = netdev_get_tx_queue(dev, queue);
1299 itxd = ring->next_free;
1300 itxd_pdma = qdma_to_pdma(ring, itxd);
1301 if (itxd == ring->last_free)
1304 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1305 memset(itx_buf, 0, sizeof(*itx_buf));
1307 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1309 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1312 mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1314 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1315 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1316 MTK_TX_FLAGS_FPORT1;
1317 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1322 txd_pdma = qdma_to_pdma(ring, txd);
1324 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1325 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1326 unsigned int offset = 0;
1327 int frag_size = skb_frag_size(frag);
1330 bool new_desc = true;
1332 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1334 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1335 txd_pdma = qdma_to_pdma(ring, txd);
1336 if (txd == ring->last_free)
1344 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1345 txd_info.size = min_t(unsigned int, frag_size,
1346 soc->txrx.dma_max_len);
1347 txd_info.qid = queue;
1348 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1349 !(frag_size - txd_info.size);
1350 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1351 offset, txd_info.size,
1353 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1356 mtk_tx_set_dma_desc(dev, txd, &txd_info);
1358 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1359 soc->txrx.txd_size);
1361 memset(tx_buf, 0, sizeof(*tx_buf));
1362 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1363 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1364 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1365 MTK_TX_FLAGS_FPORT1;
1367 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1368 txd_info.size, k++);
1370 frag_size -= txd_info.size;
1371 offset += txd_info.size;
1375 /* store skb to cleanup */
1376 itx_buf->type = MTK_TYPE_SKB;
1377 itx_buf->data = skb;
1379 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1381 txd_pdma->txd2 |= TX_DMA_LS0;
1383 txd_pdma->txd2 |= TX_DMA_LS1;
1386 netdev_tx_sent_queue(txq, skb->len);
1387 skb_tx_timestamp(skb);
1389 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1390 atomic_sub(n_desc, &ring->free_count);
1392 /* make sure that all changes to the dma ring are flushed before we
1397 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1398 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1399 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1403 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1405 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1412 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1415 mtk_tx_unmap(eth, tx_buf, NULL, false);
1417 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1418 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1419 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1421 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1422 itxd_pdma = qdma_to_pdma(ring, itxd);
1423 } while (itxd != txd);
1428 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1433 if (skb_is_gso(skb)) {
1434 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1435 frag = &skb_shinfo(skb)->frags[i];
1436 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1437 eth->soc->txrx.dma_max_len);
1440 nfrags += skb_shinfo(skb)->nr_frags;
1446 static int mtk_queue_stopped(struct mtk_eth *eth)
1450 for (i = 0; i < MTK_MAC_COUNT; i++) {
1451 if (!eth->netdev[i])
1453 if (netif_queue_stopped(eth->netdev[i]))
1460 static void mtk_wake_queue(struct mtk_eth *eth)
1464 for (i = 0; i < MTK_MAC_COUNT; i++) {
1465 if (!eth->netdev[i])
1467 netif_tx_wake_all_queues(eth->netdev[i]);
1471 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1473 struct mtk_mac *mac = netdev_priv(dev);
1474 struct mtk_eth *eth = mac->hw;
1475 struct mtk_tx_ring *ring = ð->tx_ring;
1476 struct net_device_stats *stats = &dev->stats;
1480 /* normally we can rely on the stack not calling this more than once,
1481 * however we have 2 queues running on the same ring so we need to lock
1484 spin_lock(ð->page_lock);
1486 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1489 tx_num = mtk_cal_txd_req(eth, skb);
1490 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1491 netif_tx_stop_all_queues(dev);
1492 netif_err(eth, tx_queued, dev,
1493 "Tx Ring full when queue awake!\n");
1494 spin_unlock(ð->page_lock);
1495 return NETDEV_TX_BUSY;
1498 /* TSO: fill MSS info in tcp checksum field */
1499 if (skb_is_gso(skb)) {
1500 if (skb_cow_head(skb, 0)) {
1501 netif_warn(eth, tx_err, dev,
1502 "GSO expand head fail.\n");
1506 if (skb_shinfo(skb)->gso_type &
1507 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1509 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1513 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1516 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1517 netif_tx_stop_all_queues(dev);
1519 spin_unlock(ð->page_lock);
1521 return NETDEV_TX_OK;
1524 spin_unlock(ð->page_lock);
1525 stats->tx_dropped++;
1526 dev_kfree_skb_any(skb);
1527 return NETDEV_TX_OK;
1530 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1533 struct mtk_rx_ring *ring;
1537 return ð->rx_ring[0];
1539 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1540 struct mtk_rx_dma *rxd;
1542 ring = ð->rx_ring[i];
1543 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1544 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1545 if (rxd->rxd2 & RX_DMA_DONE) {
1546 ring->calc_idx_update = true;
1554 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1556 struct mtk_rx_ring *ring;
1560 ring = ð->rx_ring[0];
1561 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1563 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1564 ring = ð->rx_ring[i];
1565 if (ring->calc_idx_update) {
1566 ring->calc_idx_update = false;
1567 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1573 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1575 return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
1578 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1579 struct xdp_rxq_info *xdp_q,
1582 struct page_pool_params pp_params = {
1584 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1586 .nid = NUMA_NO_NODE,
1587 .dev = eth->dma_dev,
1588 .offset = MTK_PP_HEADROOM,
1589 .max_len = MTK_PP_MAX_BUF_SIZE,
1591 struct page_pool *pp;
1594 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1596 pp = page_pool_create(&pp_params);
1600 err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id,
1601 eth->rx_napi.napi_id, PAGE_SIZE);
1605 err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1607 goto err_unregister_rxq;
1612 xdp_rxq_info_unreg(xdp_q);
1614 page_pool_destroy(pp);
1616 return ERR_PTR(err);
1619 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1624 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1628 *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1629 return page_address(page);
1632 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1634 if (ring->page_pool)
1635 page_pool_put_full_page(ring->page_pool,
1636 virt_to_head_page(data), napi);
1638 skb_free_frag(data);
1641 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1642 struct mtk_tx_dma_desc_info *txd_info,
1643 struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1644 void *data, u16 headroom, int index, bool dma_map)
1646 struct mtk_tx_ring *ring = ð->tx_ring;
1647 struct mtk_mac *mac = netdev_priv(dev);
1648 struct mtk_tx_dma *txd_pdma;
1650 if (dma_map) { /* ndo_xdp_xmit */
1651 txd_info->addr = dma_map_single(eth->dma_dev, data,
1652 txd_info->size, DMA_TO_DEVICE);
1653 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1656 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1658 struct page *page = virt_to_head_page(data);
1660 txd_info->addr = page_pool_get_dma_addr(page) +
1661 sizeof(struct xdp_frame) + headroom;
1662 dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1663 txd_info->size, DMA_BIDIRECTIONAL);
1665 mtk_tx_set_dma_desc(dev, txd, txd_info);
1667 tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
1668 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1669 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1671 txd_pdma = qdma_to_pdma(ring, txd);
1672 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1678 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1679 struct net_device *dev, bool dma_map)
1681 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1682 const struct mtk_soc_data *soc = eth->soc;
1683 struct mtk_tx_ring *ring = ð->tx_ring;
1684 struct mtk_mac *mac = netdev_priv(dev);
1685 struct mtk_tx_dma_desc_info txd_info = {
1688 .last = !xdp_frame_has_frags(xdpf),
1691 int err, index = 0, n_desc = 1, nr_frags;
1692 struct mtk_tx_buf *htx_buf, *tx_buf;
1693 struct mtk_tx_dma *htxd, *txd;
1694 void *data = xdpf->data;
1696 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1699 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1700 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1703 spin_lock(ð->page_lock);
1705 txd = ring->next_free;
1706 if (txd == ring->last_free) {
1707 spin_unlock(ð->page_lock);
1712 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
1713 memset(tx_buf, 0, sizeof(*tx_buf));
1717 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1718 data, xdpf->headroom, index, dma_map);
1725 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1726 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1727 if (txd == ring->last_free)
1730 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1731 soc->txrx.txd_size);
1732 memset(tx_buf, 0, sizeof(*tx_buf));
1736 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1737 txd_info.size = skb_frag_size(&sinfo->frags[index]);
1738 txd_info.last = index + 1 == nr_frags;
1739 txd_info.qid = mac->id;
1740 data = skb_frag_address(&sinfo->frags[index]);
1744 /* store xdpf for cleanup */
1745 htx_buf->data = xdpf;
1747 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1748 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1751 txd_pdma->txd2 |= TX_DMA_LS0;
1753 txd_pdma->txd2 |= TX_DMA_LS1;
1756 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1757 atomic_sub(n_desc, &ring->free_count);
1759 /* make sure that all changes to the dma ring are flushed before we
1764 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1765 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1769 idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
1770 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1771 MT7628_TX_CTX_IDX0);
1774 spin_unlock(ð->page_lock);
1779 while (htxd != txd) {
1780 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1781 mtk_tx_unmap(eth, tx_buf, NULL, false);
1783 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1784 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1785 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1787 txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1790 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1793 spin_unlock(ð->page_lock);
1798 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1799 struct xdp_frame **frames, u32 flags)
1801 struct mtk_mac *mac = netdev_priv(dev);
1802 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1803 struct mtk_eth *eth = mac->hw;
1806 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1809 for (i = 0; i < num_frame; i++) {
1810 if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1815 u64_stats_update_begin(&hw_stats->syncp);
1816 hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1817 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1818 u64_stats_update_end(&hw_stats->syncp);
1823 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1824 struct xdp_buff *xdp, struct net_device *dev)
1826 struct mtk_mac *mac = netdev_priv(dev);
1827 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1828 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1829 struct bpf_prog *prog;
1834 prog = rcu_dereference(eth->prog);
1838 act = bpf_prog_run_xdp(prog, xdp);
1841 count = &hw_stats->xdp_stats.rx_xdp_pass;
1844 if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1849 count = &hw_stats->xdp_stats.rx_xdp_redirect;
1852 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1854 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1855 count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1860 count = &hw_stats->xdp_stats.rx_xdp_tx;
1864 bpf_warn_invalid_xdp_action(dev, prog, act);
1867 trace_xdp_exception(dev, prog, act);
1873 page_pool_put_full_page(ring->page_pool,
1874 virt_to_head_page(xdp->data), true);
1877 u64_stats_update_begin(&hw_stats->syncp);
1878 *count = *count + 1;
1879 u64_stats_update_end(&hw_stats->syncp);
1886 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1887 struct mtk_eth *eth)
1889 struct dim_sample dim_sample = {};
1890 struct mtk_rx_ring *ring;
1891 bool xdp_flush = false;
1893 struct sk_buff *skb;
1894 u8 *data, *new_data;
1895 struct mtk_rx_dma_v2 *rxd, trxd;
1896 int done = 0, bytes = 0;
1898 while (done < budget) {
1899 unsigned int pktlen, *rxdcsum;
1900 bool has_hwaccel_tag = false;
1901 struct net_device *netdev;
1902 u16 vlan_proto, vlan_tci;
1903 dma_addr_t dma_addr;
1907 ring = mtk_get_rx_ring(eth);
1908 if (unlikely(!ring))
1911 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1912 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1913 data = ring->data[idx];
1915 if (!mtk_rx_get_desc(eth, &trxd, rxd))
1918 /* find out which mac the packet come from. values start at 1 */
1919 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1920 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1921 else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
1922 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
1923 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1925 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1929 netdev = eth->netdev[mac];
1931 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1934 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1936 /* alloc new buffer */
1937 if (ring->page_pool) {
1938 struct page *page = virt_to_head_page(data);
1939 struct xdp_buff xdp;
1942 new_data = mtk_page_pool_get_buff(ring->page_pool,
1945 if (unlikely(!new_data)) {
1946 netdev->stats.rx_dropped++;
1950 dma_sync_single_for_cpu(eth->dma_dev,
1951 page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
1952 pktlen, page_pool_get_dma_dir(ring->page_pool));
1954 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
1955 xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
1957 xdp_buff_clear_frags_flag(&xdp);
1959 ret = mtk_xdp_run(eth, ring, &xdp, netdev);
1960 if (ret == XDP_REDIRECT)
1963 if (ret != XDP_PASS)
1966 skb = build_skb(data, PAGE_SIZE);
1967 if (unlikely(!skb)) {
1968 page_pool_put_full_page(ring->page_pool,
1970 netdev->stats.rx_dropped++;
1974 skb_reserve(skb, xdp.data - xdp.data_hard_start);
1975 skb_put(skb, xdp.data_end - xdp.data);
1976 skb_mark_for_recycle(skb);
1978 if (ring->frag_size <= PAGE_SIZE)
1979 new_data = napi_alloc_frag(ring->frag_size);
1981 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
1983 if (unlikely(!new_data)) {
1984 netdev->stats.rx_dropped++;
1988 dma_addr = dma_map_single(eth->dma_dev,
1989 new_data + NET_SKB_PAD + eth->ip_align,
1990 ring->buf_size, DMA_FROM_DEVICE);
1991 if (unlikely(dma_mapping_error(eth->dma_dev,
1993 skb_free_frag(new_data);
1994 netdev->stats.rx_dropped++;
1998 dma_unmap_single(eth->dma_dev, trxd.rxd1,
1999 ring->buf_size, DMA_FROM_DEVICE);
2001 skb = build_skb(data, ring->frag_size);
2002 if (unlikely(!skb)) {
2003 netdev->stats.rx_dropped++;
2004 skb_free_frag(data);
2008 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2009 skb_put(skb, pktlen);
2015 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2016 reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2017 hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2018 if (hash != MTK_RXD5_FOE_ENTRY)
2019 skb_set_hash(skb, jhash_1word(hash, 0),
2021 rxdcsum = &trxd.rxd3;
2023 reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2024 hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2025 if (hash != MTK_RXD4_FOE_ENTRY)
2026 skb_set_hash(skb, jhash_1word(hash, 0),
2028 rxdcsum = &trxd.rxd4;
2031 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
2032 skb->ip_summed = CHECKSUM_UNNECESSARY;
2034 skb_checksum_none_assert(skb);
2035 skb->protocol = eth_type_trans(skb, netdev);
2037 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2038 mtk_ppe_check_skb(eth->ppe[0], skb, hash);
2040 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2041 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2042 if (trxd.rxd3 & RX_DMA_VTAG_V2) {
2043 vlan_proto = RX_DMA_VPID(trxd.rxd4);
2044 vlan_tci = RX_DMA_VID(trxd.rxd4);
2045 has_hwaccel_tag = true;
2047 } else if (trxd.rxd2 & RX_DMA_VTAG) {
2048 vlan_proto = RX_DMA_VPID(trxd.rxd3);
2049 vlan_tci = RX_DMA_VID(trxd.rxd3);
2050 has_hwaccel_tag = true;
2054 /* When using VLAN untagging in combination with DSA, the
2055 * hardware treats the MTK special tag as a VLAN and untags it.
2057 if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
2058 unsigned int port = vlan_proto & GENMASK(2, 0);
2060 if (port < ARRAY_SIZE(eth->dsa_meta) &&
2061 eth->dsa_meta[port])
2062 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst);
2063 } else if (has_hwaccel_tag) {
2064 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
2067 skb_record_rx_queue(skb, 0);
2068 napi_gro_receive(napi, skb);
2071 ring->data[idx] = new_data;
2072 rxd->rxd1 = (unsigned int)dma_addr;
2074 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2075 rxd->rxd2 = RX_DMA_LSO;
2077 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2079 ring->calc_idx = idx;
2085 /* make sure that all changes to the dma ring are flushed before
2089 mtk_update_rx_cpu_idx(eth);
2092 eth->rx_packets += done;
2093 eth->rx_bytes += bytes;
2094 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2096 net_dim(ð->rx_dim, dim_sample);
2104 struct mtk_poll_state {
2105 struct netdev_queue *txq;
2112 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2113 struct sk_buff *skb)
2115 struct netdev_queue *txq;
2116 struct net_device *dev;
2117 unsigned int bytes = skb->len;
2121 eth->tx_bytes += bytes;
2123 dev = eth->netdev[mac];
2127 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2128 if (state->txq == txq) {
2130 state->bytes += bytes;
2135 netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2139 state->bytes = bytes;
2142 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2143 struct mtk_poll_state *state)
2145 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2146 struct mtk_tx_ring *ring = ð->tx_ring;
2147 struct mtk_tx_buf *tx_buf;
2148 struct xdp_frame_bulk bq;
2149 struct mtk_tx_dma *desc;
2152 cpu = ring->last_free_ptr;
2153 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2155 desc = mtk_qdma_phys_to_virt(ring, cpu);
2156 xdp_frame_bulk_init(&bq);
2158 while ((cpu != dma) && budget) {
2159 u32 next_cpu = desc->txd2;
2162 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2163 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2166 tx_buf = mtk_desc_to_tx_buf(ring, desc,
2167 eth->soc->txrx.txd_size);
2168 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
2174 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2175 if (tx_buf->type == MTK_TYPE_SKB)
2176 mtk_poll_tx_done(eth, state, mac, tx_buf->data);
2180 mtk_tx_unmap(eth, tx_buf, &bq, true);
2182 ring->last_free = desc;
2183 atomic_inc(&ring->free_count);
2187 xdp_flush_frame_bulk(&bq);
2189 ring->last_free_ptr = cpu;
2190 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2195 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2196 struct mtk_poll_state *state)
2198 struct mtk_tx_ring *ring = ð->tx_ring;
2199 struct mtk_tx_buf *tx_buf;
2200 struct xdp_frame_bulk bq;
2201 struct mtk_tx_dma *desc;
2204 cpu = ring->cpu_idx;
2205 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2206 xdp_frame_bulk_init(&bq);
2208 while ((cpu != dma) && budget) {
2209 tx_buf = &ring->buf[cpu];
2213 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2214 if (tx_buf->type == MTK_TYPE_SKB)
2215 mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2218 mtk_tx_unmap(eth, tx_buf, &bq, true);
2220 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2221 ring->last_free = desc;
2222 atomic_inc(&ring->free_count);
2224 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2226 xdp_flush_frame_bulk(&bq);
2228 ring->cpu_idx = cpu;
2233 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2235 struct mtk_tx_ring *ring = ð->tx_ring;
2236 struct dim_sample dim_sample = {};
2237 struct mtk_poll_state state = {};
2239 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2240 budget = mtk_poll_tx_qdma(eth, budget, &state);
2242 budget = mtk_poll_tx_pdma(eth, budget, &state);
2245 netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2247 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2249 net_dim(ð->tx_dim, dim_sample);
2251 if (mtk_queue_stopped(eth) &&
2252 (atomic_read(&ring->free_count) > ring->thresh))
2253 mtk_wake_queue(eth);
2258 static void mtk_handle_status_irq(struct mtk_eth *eth)
2260 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2262 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2263 mtk_stats_update(eth);
2264 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2269 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2271 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2272 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2275 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2276 mtk_handle_status_irq(eth);
2277 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2278 tx_done = mtk_poll_tx(eth, budget);
2280 if (unlikely(netif_msg_intr(eth))) {
2282 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2283 mtk_r32(eth, reg_map->tx_irq_status),
2284 mtk_r32(eth, reg_map->tx_irq_mask));
2287 if (tx_done == budget)
2290 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2293 if (napi_complete_done(napi, tx_done))
2294 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2299 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2301 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2302 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2303 int rx_done_total = 0;
2305 mtk_handle_status_irq(eth);
2310 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2311 reg_map->pdma.irq_status);
2312 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2313 rx_done_total += rx_done;
2315 if (unlikely(netif_msg_intr(eth))) {
2317 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2318 mtk_r32(eth, reg_map->pdma.irq_status),
2319 mtk_r32(eth, reg_map->pdma.irq_mask));
2322 if (rx_done_total == budget)
2325 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
2326 eth->soc->txrx.rx_irq_done_mask);
2328 if (napi_complete_done(napi, rx_done_total))
2329 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2331 return rx_done_total;
2334 static int mtk_tx_alloc(struct mtk_eth *eth)
2336 const struct mtk_soc_data *soc = eth->soc;
2337 struct mtk_tx_ring *ring = ð->tx_ring;
2338 int i, sz = soc->txrx.txd_size;
2339 struct mtk_tx_dma_v2 *txd;
2343 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2344 ring_size = MTK_QDMA_RING_SIZE;
2346 ring_size = MTK_DMA_SIZE;
2348 ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2353 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2354 &ring->phys, GFP_KERNEL);
2358 for (i = 0; i < ring_size; i++) {
2359 int next = (i + 1) % ring_size;
2360 u32 next_ptr = ring->phys + next * sz;
2362 txd = ring->dma + i * sz;
2363 txd->txd2 = next_ptr;
2364 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2366 if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
2374 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2375 * only as the framework. The real HW descriptors are the PDMA
2376 * descriptors in ring->dma_pdma.
2378 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2379 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2380 &ring->phys_pdma, GFP_KERNEL);
2381 if (!ring->dma_pdma)
2384 for (i = 0; i < ring_size; i++) {
2385 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2386 ring->dma_pdma[i].txd4 = 0;
2390 ring->dma_size = ring_size;
2391 atomic_set(&ring->free_count, ring_size - 2);
2392 ring->next_free = ring->dma;
2393 ring->last_free = (void *)txd;
2394 ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2395 ring->thresh = MAX_SKB_FRAGS;
2397 /* make sure that all changes to the dma ring are flushed before we
2402 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2403 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2404 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2406 ring->phys + ((ring_size - 1) * sz),
2407 soc->reg_map->qdma.crx_ptr);
2408 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2410 for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2411 val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2412 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2414 val = MTK_QTX_SCH_MIN_RATE_EN |
2415 /* minimum: 10 Mbps */
2416 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2417 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2418 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2419 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2420 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2421 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2422 ofs += MTK_QTX_OFFSET;
2424 val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2425 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2426 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2427 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2429 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2430 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2431 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2432 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2441 static void mtk_tx_clean(struct mtk_eth *eth)
2443 const struct mtk_soc_data *soc = eth->soc;
2444 struct mtk_tx_ring *ring = ð->tx_ring;
2448 for (i = 0; i < ring->dma_size; i++)
2449 mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2455 dma_free_coherent(eth->dma_dev,
2456 ring->dma_size * soc->txrx.txd_size,
2457 ring->dma, ring->phys);
2461 if (ring->dma_pdma) {
2462 dma_free_coherent(eth->dma_dev,
2463 ring->dma_size * soc->txrx.txd_size,
2464 ring->dma_pdma, ring->phys_pdma);
2465 ring->dma_pdma = NULL;
2469 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2471 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2472 struct mtk_rx_ring *ring;
2473 int rx_data_len, rx_dma_size;
2476 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2479 ring = ð->rx_ring_qdma;
2481 ring = ð->rx_ring[ring_no];
2484 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2485 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2486 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2488 rx_data_len = ETH_DATA_LEN;
2489 rx_dma_size = MTK_DMA_SIZE;
2492 ring->frag_size = mtk_max_frag_size(rx_data_len);
2493 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2494 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2499 if (mtk_page_pool_enabled(eth)) {
2500 struct page_pool *pp;
2502 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2507 ring->page_pool = pp;
2510 ring->dma = dma_alloc_coherent(eth->dma_dev,
2511 rx_dma_size * eth->soc->txrx.rxd_size,
2512 &ring->phys, GFP_KERNEL);
2516 for (i = 0; i < rx_dma_size; i++) {
2517 struct mtk_rx_dma_v2 *rxd;
2518 dma_addr_t dma_addr;
2521 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2522 if (ring->page_pool) {
2523 data = mtk_page_pool_get_buff(ring->page_pool,
2524 &dma_addr, GFP_KERNEL);
2528 if (ring->frag_size <= PAGE_SIZE)
2529 data = netdev_alloc_frag(ring->frag_size);
2531 data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2536 dma_addr = dma_map_single(eth->dma_dev,
2537 data + NET_SKB_PAD + eth->ip_align,
2538 ring->buf_size, DMA_FROM_DEVICE);
2539 if (unlikely(dma_mapping_error(eth->dma_dev,
2541 skb_free_frag(data);
2545 rxd->rxd1 = (unsigned int)dma_addr;
2546 ring->data[i] = data;
2548 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2549 rxd->rxd2 = RX_DMA_LSO;
2551 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2555 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2563 ring->dma_size = rx_dma_size;
2564 ring->calc_idx_update = false;
2565 ring->calc_idx = rx_dma_size - 1;
2566 if (rx_flag == MTK_RX_FLAGS_QDMA)
2567 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2568 ring_no * MTK_QRX_OFFSET;
2570 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2571 ring_no * MTK_QRX_OFFSET;
2572 /* make sure that all changes to the dma ring are flushed before we
2577 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2578 mtk_w32(eth, ring->phys,
2579 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2580 mtk_w32(eth, rx_dma_size,
2581 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2582 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2583 reg_map->qdma.rst_idx);
2585 mtk_w32(eth, ring->phys,
2586 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2587 mtk_w32(eth, rx_dma_size,
2588 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2589 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2590 reg_map->pdma.rst_idx);
2592 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2597 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2601 if (ring->data && ring->dma) {
2602 for (i = 0; i < ring->dma_size; i++) {
2603 struct mtk_rx_dma *rxd;
2608 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2612 dma_unmap_single(eth->dma_dev, rxd->rxd1,
2613 ring->buf_size, DMA_FROM_DEVICE);
2614 mtk_rx_put_buff(ring, ring->data[i], false);
2621 dma_free_coherent(eth->dma_dev,
2622 ring->dma_size * eth->soc->txrx.rxd_size,
2623 ring->dma, ring->phys);
2627 if (ring->page_pool) {
2628 if (xdp_rxq_info_is_reg(&ring->xdp_q))
2629 xdp_rxq_info_unreg(&ring->xdp_q);
2630 page_pool_destroy(ring->page_pool);
2631 ring->page_pool = NULL;
2635 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2638 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2639 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2641 /* set LRO rings to auto-learn modes */
2642 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2644 /* validate LRO ring */
2645 ring_ctrl_dw2 |= MTK_RING_VLD;
2647 /* set AGE timer (unit: 20us) */
2648 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2649 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2651 /* set max AGG timer (unit: 20us) */
2652 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2654 /* set max LRO AGG count */
2655 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2656 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2658 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2659 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2660 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2661 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2664 /* IPv4 checksum update enable */
2665 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2667 /* switch priority comparison to packet count mode */
2668 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2670 /* bandwidth threshold setting */
2671 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2673 /* auto-learn score delta setting */
2674 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2676 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2677 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2678 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2680 /* set HW LRO mode & the max aggregation count for rx packets */
2681 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2683 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2684 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2687 lro_ctrl_dw0 |= MTK_LRO_EN;
2689 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2690 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2695 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2700 /* relinquish lro rings, flush aggregated packets */
2701 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2703 /* wait for relinquishments done */
2704 for (i = 0; i < 10; i++) {
2705 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2706 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2713 /* invalidate lro rings */
2714 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2715 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2717 /* disable HW LRO */
2718 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2721 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2725 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2727 /* invalidate the IP setting */
2728 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2730 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2732 /* validate the IP setting */
2733 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2736 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2740 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2742 /* invalidate the IP setting */
2743 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2745 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2748 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2753 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2754 if (mac->hwlro_ip[i])
2761 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2762 struct ethtool_rxnfc *cmd)
2764 struct ethtool_rx_flow_spec *fsp =
2765 (struct ethtool_rx_flow_spec *)&cmd->fs;
2766 struct mtk_mac *mac = netdev_priv(dev);
2767 struct mtk_eth *eth = mac->hw;
2770 if ((fsp->flow_type != TCP_V4_FLOW) ||
2771 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2772 (fsp->location > 1))
2775 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2776 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2778 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2780 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2785 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2786 struct ethtool_rxnfc *cmd)
2788 struct ethtool_rx_flow_spec *fsp =
2789 (struct ethtool_rx_flow_spec *)&cmd->fs;
2790 struct mtk_mac *mac = netdev_priv(dev);
2791 struct mtk_eth *eth = mac->hw;
2794 if (fsp->location > 1)
2797 mac->hwlro_ip[fsp->location] = 0;
2798 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2800 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2802 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2807 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2809 struct mtk_mac *mac = netdev_priv(dev);
2810 struct mtk_eth *eth = mac->hw;
2813 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2814 mac->hwlro_ip[i] = 0;
2815 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2817 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2820 mac->hwlro_ip_cnt = 0;
2823 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2824 struct ethtool_rxnfc *cmd)
2826 struct mtk_mac *mac = netdev_priv(dev);
2827 struct ethtool_rx_flow_spec *fsp =
2828 (struct ethtool_rx_flow_spec *)&cmd->fs;
2830 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2833 /* only tcp dst ipv4 is meaningful, others are meaningless */
2834 fsp->flow_type = TCP_V4_FLOW;
2835 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2836 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2838 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2839 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2840 fsp->h_u.tcp_ip4_spec.psrc = 0;
2841 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2842 fsp->h_u.tcp_ip4_spec.pdst = 0;
2843 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2844 fsp->h_u.tcp_ip4_spec.tos = 0;
2845 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2850 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2851 struct ethtool_rxnfc *cmd,
2854 struct mtk_mac *mac = netdev_priv(dev);
2858 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2859 if (mac->hwlro_ip[i]) {
2865 cmd->rule_cnt = cnt;
2870 static netdev_features_t mtk_fix_features(struct net_device *dev,
2871 netdev_features_t features)
2873 if (!(features & NETIF_F_LRO)) {
2874 struct mtk_mac *mac = netdev_priv(dev);
2875 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2878 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2880 features |= NETIF_F_LRO;
2887 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2889 struct mtk_mac *mac = netdev_priv(dev);
2890 struct mtk_eth *eth = mac->hw;
2891 netdev_features_t diff = dev->features ^ features;
2894 if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
2895 mtk_hwlro_netdev_disable(dev);
2897 /* Set RX VLAN offloading */
2898 if (!(diff & NETIF_F_HW_VLAN_CTAG_RX))
2901 mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX),
2904 /* sync features with other MAC */
2905 for (i = 0; i < MTK_MAC_COUNT; i++) {
2906 if (!eth->netdev[i] || eth->netdev[i] == dev)
2908 eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2909 eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX;
2915 /* wait for DMA to finish whatever it is doing before we start using it again */
2916 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2922 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2923 reg = eth->soc->reg_map->qdma.glo_cfg;
2925 reg = eth->soc->reg_map->pdma.glo_cfg;
2927 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
2928 !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
2929 5, MTK_DMA_BUSY_TIMEOUT_US);
2931 dev_err(eth->dev, "DMA init timeout\n");
2936 static int mtk_dma_init(struct mtk_eth *eth)
2941 if (mtk_dma_busy_wait(eth))
2944 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2945 /* QDMA needs scratch memory for internal reordering of the
2948 err = mtk_init_fq_dma(eth);
2953 err = mtk_tx_alloc(eth);
2957 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2958 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2963 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2968 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2969 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2973 err = mtk_hwlro_rx_init(eth);
2978 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2979 /* Enable random early drop and set drop threshold
2982 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2983 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
2984 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
2990 static void mtk_dma_free(struct mtk_eth *eth)
2992 const struct mtk_soc_data *soc = eth->soc;
2995 for (i = 0; i < MTK_MAC_COUNT; i++)
2997 netdev_reset_queue(eth->netdev[i]);
2998 if (eth->scratch_ring) {
2999 dma_free_coherent(eth->dma_dev,
3000 MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
3001 eth->scratch_ring, eth->phy_scratch_ring);
3002 eth->scratch_ring = NULL;
3003 eth->phy_scratch_ring = 0;
3006 mtk_rx_clean(eth, ð->rx_ring[0]);
3007 mtk_rx_clean(eth, ð->rx_ring_qdma);
3010 mtk_hwlro_rx_uninit(eth);
3011 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3012 mtk_rx_clean(eth, ð->rx_ring[i]);
3015 kfree(eth->scratch_head);
3018 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3020 u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3022 return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3023 (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3024 (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3027 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3029 struct mtk_mac *mac = netdev_priv(dev);
3030 struct mtk_eth *eth = mac->hw;
3032 if (test_bit(MTK_RESETTING, ð->state))
3035 if (!mtk_hw_reset_check(eth))
3038 eth->netdev[mac->id]->stats.tx_errors++;
3039 netif_err(eth, tx_err, dev, "transmit timed out\n");
3041 schedule_work(ð->pending_work);
3044 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3046 struct mtk_eth *eth = _eth;
3049 if (likely(napi_schedule_prep(ð->rx_napi))) {
3050 __napi_schedule(ð->rx_napi);
3051 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3057 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3059 struct mtk_eth *eth = _eth;
3062 if (likely(napi_schedule_prep(ð->tx_napi))) {
3063 __napi_schedule(ð->tx_napi);
3064 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3070 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3072 struct mtk_eth *eth = _eth;
3073 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3075 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3076 eth->soc->txrx.rx_irq_done_mask) {
3077 if (mtk_r32(eth, reg_map->pdma.irq_status) &
3078 eth->soc->txrx.rx_irq_done_mask)
3079 mtk_handle_irq_rx(irq, _eth);
3081 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3082 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3083 mtk_handle_irq_tx(irq, _eth);
3089 #ifdef CONFIG_NET_POLL_CONTROLLER
3090 static void mtk_poll_controller(struct net_device *dev)
3092 struct mtk_mac *mac = netdev_priv(dev);
3093 struct mtk_eth *eth = mac->hw;
3095 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3096 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3097 mtk_handle_irq_rx(eth->irq[2], dev);
3098 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3099 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
3103 static int mtk_start_dma(struct mtk_eth *eth)
3105 u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3106 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3109 err = mtk_dma_init(eth);
3115 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3116 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3117 val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3118 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3119 MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3121 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3122 val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3123 MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3124 MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3126 val |= MTK_RX_BT_32DWORDS;
3127 mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3130 MTK_RX_DMA_EN | rx_2b_offset |
3131 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3132 reg_map->pdma.glo_cfg);
3134 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3135 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3136 reg_map->pdma.glo_cfg);
3142 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
3146 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3149 for (i = 0; i < MTK_MAC_COUNT; i++) {
3150 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
3152 /* default setup the forward port to send frame to PDMA */
3155 /* Enable RX checksum */
3156 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3160 if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
3161 val |= MTK_GDMA_SPECIAL_TAG;
3163 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
3165 /* Reset and enable PSE */
3166 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3167 mtk_w32(eth, 0, MTK_RST_GL);
3171 static bool mtk_uses_dsa(struct net_device *dev)
3173 #if IS_ENABLED(CONFIG_NET_DSA)
3174 return netdev_uses_dsa(dev) &&
3175 dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3181 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3183 struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3184 struct mtk_eth *eth = mac->hw;
3185 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3186 struct ethtool_link_ksettings s;
3187 struct net_device *ldev;
3188 struct list_head *iter;
3189 struct dsa_port *dp;
3191 if (event != NETDEV_CHANGE)
3194 netdev_for_each_lower_dev(dev, ldev, iter) {
3195 if (netdev_priv(ldev) == mac)
3202 if (!dsa_slave_dev_check(dev))
3205 if (__ethtool_get_link_ksettings(dev, &s))
3208 if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3211 dp = dsa_port_from_netdev(dev);
3212 if (dp->index >= MTK_QDMA_NUM_QUEUES)
3215 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3220 static int mtk_open(struct net_device *dev)
3222 struct mtk_mac *mac = netdev_priv(dev);
3223 struct mtk_eth *eth = mac->hw;
3226 if (mtk_uses_dsa(dev) && !eth->prog) {
3227 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3228 struct metadata_dst *md_dst = eth->dsa_meta[i];
3233 md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3238 md_dst->u.port_info.port_id = i;
3239 eth->dsa_meta[i] = md_dst;
3242 /* Hardware special tag parsing needs to be disabled if at least
3243 * one MAC does not use DSA.
3245 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3246 val &= ~MTK_CDMP_STAG_EN;
3247 mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3250 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3252 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3257 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3258 if (!refcount_read(ð->dma_refcnt)) {
3259 const struct mtk_soc_data *soc = eth->soc;
3263 err = mtk_start_dma(eth);
3265 phylink_disconnect_phy(mac->phylink);
3269 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3270 mtk_ppe_start(eth->ppe[i]);
3272 gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
3274 mtk_gdm_config(eth, gdm_config);
3276 napi_enable(ð->tx_napi);
3277 napi_enable(ð->rx_napi);
3278 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3279 mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
3280 refcount_set(ð->dma_refcnt, 1);
3283 refcount_inc(ð->dma_refcnt);
3285 phylink_start(mac->phylink);
3286 netif_tx_start_all_queues(dev);
3291 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3296 /* stop the dma engine */
3297 spin_lock_bh(ð->page_lock);
3298 val = mtk_r32(eth, glo_cfg);
3299 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3301 spin_unlock_bh(ð->page_lock);
3303 /* wait for dma stop */
3304 for (i = 0; i < 10; i++) {
3305 val = mtk_r32(eth, glo_cfg);
3306 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3314 static int mtk_stop(struct net_device *dev)
3316 struct mtk_mac *mac = netdev_priv(dev);
3317 struct mtk_eth *eth = mac->hw;
3320 phylink_stop(mac->phylink);
3322 netif_tx_disable(dev);
3324 phylink_disconnect_phy(mac->phylink);
3326 /* only shutdown DMA if this is the last user */
3327 if (!refcount_dec_and_test(ð->dma_refcnt))
3330 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3332 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3333 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3334 napi_disable(ð->tx_napi);
3335 napi_disable(ð->rx_napi);
3337 cancel_work_sync(ð->rx_dim.work);
3338 cancel_work_sync(ð->tx_dim.work);
3340 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3341 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3342 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3346 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3347 mtk_ppe_stop(eth->ppe[i]);
3352 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3353 struct netlink_ext_ack *extack)
3355 struct mtk_mac *mac = netdev_priv(dev);
3356 struct mtk_eth *eth = mac->hw;
3357 struct bpf_prog *old_prog;
3361 NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3365 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3366 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3370 need_update = !!eth->prog != !!prog;
3371 if (netif_running(dev) && need_update)
3374 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3376 bpf_prog_put(old_prog);
3378 if (netif_running(dev) && need_update)
3379 return mtk_open(dev);
3384 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3386 switch (xdp->command) {
3387 case XDP_SETUP_PROG:
3388 return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3394 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3396 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3400 usleep_range(1000, 1100);
3401 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3407 static void mtk_clk_disable(struct mtk_eth *eth)
3411 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3412 clk_disable_unprepare(eth->clks[clk]);
3415 static int mtk_clk_enable(struct mtk_eth *eth)
3419 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3420 ret = clk_prepare_enable(eth->clks[clk]);
3422 goto err_disable_clks;
3429 clk_disable_unprepare(eth->clks[clk]);
3434 static void mtk_dim_rx(struct work_struct *work)
3436 struct dim *dim = container_of(work, struct dim, work);
3437 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3438 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3439 struct dim_cq_moder cur_profile;
3442 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3444 spin_lock_bh(ð->dim_lock);
3446 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3447 val &= MTK_PDMA_DELAY_TX_MASK;
3448 val |= MTK_PDMA_DELAY_RX_EN;
3450 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3451 val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3453 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3454 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3456 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3457 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3458 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3460 spin_unlock_bh(ð->dim_lock);
3462 dim->state = DIM_START_MEASURE;
3465 static void mtk_dim_tx(struct work_struct *work)
3467 struct dim *dim = container_of(work, struct dim, work);
3468 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3469 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3470 struct dim_cq_moder cur_profile;
3473 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3475 spin_lock_bh(ð->dim_lock);
3477 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3478 val &= MTK_PDMA_DELAY_RX_MASK;
3479 val |= MTK_PDMA_DELAY_TX_EN;
3481 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3482 val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3484 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3485 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3487 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3488 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3489 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3491 spin_unlock_bh(ð->dim_lock);
3493 dim->state = DIM_START_MEASURE;
3496 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3498 struct mtk_eth *eth = mac->hw;
3499 u32 mcr_cur, mcr_new;
3501 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3504 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3505 mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3508 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3509 else if (val <= 1536)
3510 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3511 else if (val <= 1552)
3512 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3514 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3516 if (mcr_new != mcr_cur)
3517 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3520 static void mtk_hw_reset(struct mtk_eth *eth)
3524 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3525 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3526 val = RSTCTRL_PPE0_V2;
3531 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3532 val |= RSTCTRL_PPE1;
3534 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3536 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3537 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3541 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3545 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3549 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3553 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3555 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3556 val & RSTCTRL_FE, 1, 1000)) {
3557 dev_err(eth->dev, "warm reset failed\n");
3562 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3563 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3565 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3567 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3568 rst_mask |= RSTCTRL_PPE1;
3570 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3573 val = mtk_hw_reset_read(eth);
3574 if (!(val & rst_mask))
3575 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3578 rst_mask |= RSTCTRL_FE;
3579 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3582 val = mtk_hw_reset_read(eth);
3584 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3588 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3590 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3591 bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3592 bool oq_hang, cdm1_busy, adma_busy;
3593 bool wtx_busy, cdm_full, oq_free;
3594 u32 wdidx, val, gdm1_fc, gdm2_fc;
3595 bool qfsm_hang, qfwd_hang;
3598 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3601 /* WDMA sanity checks */
3602 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3604 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3605 wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3607 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3608 cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3610 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3611 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3612 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3614 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3615 if (++eth->reset.wdma_hang_count > 2) {
3616 eth->reset.wdma_hang_count = 0;
3622 /* QDMA sanity checks */
3623 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3624 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3626 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3627 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3628 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3629 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3630 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3631 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3633 if (qfsm_hang && qfwd_hang &&
3634 ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3635 (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3636 if (++eth->reset.qdma_hang_count > 2) {
3637 eth->reset.qdma_hang_count = 0;
3643 /* ADMA sanity checks */
3644 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3645 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3646 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3647 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3649 if (oq_hang && cdm1_busy && adma_busy) {
3650 if (++eth->reset.adma_hang_count > 2) {
3651 eth->reset.adma_hang_count = 0;
3657 eth->reset.wdma_hang_count = 0;
3658 eth->reset.qdma_hang_count = 0;
3659 eth->reset.adma_hang_count = 0;
3661 eth->reset.wdidx = wdidx;
3666 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3668 struct delayed_work *del_work = to_delayed_work(work);
3669 struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3670 reset.monitor_work);
3672 if (test_bit(MTK_RESETTING, ð->state))
3675 /* DMA stuck checks */
3676 if (mtk_hw_check_dma_hang(eth))
3677 schedule_work(ð->pending_work);
3680 schedule_delayed_work(ð->reset.monitor_work,
3681 MTK_DMA_MONITOR_TIMEOUT);
3684 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3686 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3687 ETHSYS_DMA_AG_MAP_PPE;
3688 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3691 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state))
3695 pm_runtime_enable(eth->dev);
3696 pm_runtime_get_sync(eth->dev);
3698 ret = mtk_clk_enable(eth);
3700 goto err_disable_pm;
3704 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3705 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3707 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3708 ret = device_reset(eth->dev);
3710 dev_err(eth->dev, "MAC reset failed!\n");
3711 goto err_disable_pm;
3714 /* set interrupt delays based on current Net DIM sample */
3715 mtk_dim_rx(ð->rx_dim.work);
3716 mtk_dim_tx(ð->tx_dim.work);
3718 /* disable delay and normal interrupt */
3719 mtk_tx_irq_disable(eth, ~0);
3720 mtk_rx_irq_disable(eth, ~0);
3728 mtk_hw_warm_reset(eth);
3732 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3733 /* Set FE to PDMAv2 if necessary */
3734 val = mtk_r32(eth, MTK_FE_GLO_MISC);
3735 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
3739 /* Set GE2 driving and slew rate */
3740 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3743 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3746 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3749 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3750 * up with the more appropriate value when mtk_mac_config call is being
3753 for (i = 0; i < MTK_MAC_COUNT; i++) {
3754 struct net_device *dev = eth->netdev[i];
3756 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3758 struct mtk_mac *mac = netdev_priv(dev);
3760 mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
3764 /* Indicates CDM to parse the MTK special tag from CPU
3765 * which also is working out for untag packets.
3767 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3768 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3769 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3770 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3771 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3774 /* Enable RX VLan Offloading */
3775 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3777 /* set interrupt delays based on current Net DIM sample */
3778 mtk_dim_rx(ð->rx_dim.work);
3779 mtk_dim_tx(ð->tx_dim.work);
3781 /* disable delay and normal interrupt */
3782 mtk_tx_irq_disable(eth, ~0);
3783 mtk_rx_irq_disable(eth, ~0);
3785 /* FE int grouping */
3786 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3787 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
3788 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3789 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
3790 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3792 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3793 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3794 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3796 /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
3797 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3799 /* PSE Free Queue Flow Control */
3800 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3802 /* PSE config input queue threshold */
3803 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3804 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3805 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3806 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3807 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3808 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3809 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3810 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3812 /* PSE config output queue threshold */
3813 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3814 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3815 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3816 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3817 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3818 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3819 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3820 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
3822 /* GDM and CDM Threshold */
3823 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3824 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3825 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3826 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3827 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3828 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
3835 pm_runtime_put_sync(eth->dev);
3836 pm_runtime_disable(eth->dev);
3842 static int mtk_hw_deinit(struct mtk_eth *eth)
3844 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
3847 mtk_clk_disable(eth);
3849 pm_runtime_put_sync(eth->dev);
3850 pm_runtime_disable(eth->dev);
3855 static int __init mtk_init(struct net_device *dev)
3857 struct mtk_mac *mac = netdev_priv(dev);
3858 struct mtk_eth *eth = mac->hw;
3861 ret = of_get_ethdev_address(mac->of_node, dev);
3863 /* If the mac address is invalid, use random mac address */
3864 eth_hw_addr_random(dev);
3865 dev_err(eth->dev, "generated random MAC address %pM\n",
3872 static void mtk_uninit(struct net_device *dev)
3874 struct mtk_mac *mac = netdev_priv(dev);
3875 struct mtk_eth *eth = mac->hw;
3877 phylink_disconnect_phy(mac->phylink);
3878 mtk_tx_irq_disable(eth, ~0);
3879 mtk_rx_irq_disable(eth, ~0);
3882 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
3884 int length = new_mtu + MTK_RX_ETH_HLEN;
3885 struct mtk_mac *mac = netdev_priv(dev);
3886 struct mtk_eth *eth = mac->hw;
3888 if (rcu_access_pointer(eth->prog) &&
3889 length > MTK_PP_MAX_BUF_SIZE) {
3890 netdev_err(dev, "Invalid MTU for XDP mode\n");
3894 mtk_set_mcr_max_rx(mac, length);
3900 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3902 struct mtk_mac *mac = netdev_priv(dev);
3908 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3916 static void mtk_prepare_for_reset(struct mtk_eth *eth)
3921 /* disabe FE P3 and P4 */
3922 val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
3923 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3924 val |= MTK_FE_LINK_DOWN_P4;
3925 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3927 /* adjust PPE configurations to prepare for reset */
3928 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3929 mtk_ppe_prepare_reset(eth->ppe[i]);
3931 /* disable NETSYS interrupts */
3932 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
3934 /* force link down GMAC */
3935 for (i = 0; i < 2; i++) {
3936 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
3937 mtk_w32(eth, val, MTK_MAC_MCR(i));
3941 static void mtk_pending_work(struct work_struct *work)
3943 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
3944 unsigned long restart = 0;
3949 set_bit(MTK_RESETTING, ð->state);
3951 mtk_prepare_for_reset(eth);
3953 /* Run again reset preliminary configuration in order to avoid any
3954 * possible race during FE reset since it can run releasing RTNL lock.
3956 mtk_prepare_for_reset(eth);
3958 /* stop all devices to make sure that dma is properly shut down */
3959 for (i = 0; i < MTK_MAC_COUNT; i++) {
3960 if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
3963 mtk_stop(eth->netdev[i]);
3964 __set_bit(i, &restart);
3967 usleep_range(15000, 16000);
3970 pinctrl_select_state(eth->dev->pins->p,
3971 eth->dev->pins->default_state);
3972 mtk_hw_init(eth, true);
3974 /* restart DMA and enable IRQs */
3975 for (i = 0; i < MTK_MAC_COUNT; i++) {
3976 if (!test_bit(i, &restart))
3979 if (mtk_open(eth->netdev[i])) {
3980 netif_alert(eth, ifup, eth->netdev[i],
3981 "Driver up/down cycle failed\n");
3982 dev_close(eth->netdev[i]);
3986 /* enabe FE P3 and P4 */
3987 val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
3988 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3989 val &= ~MTK_FE_LINK_DOWN_P4;
3990 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3992 clear_bit(MTK_RESETTING, ð->state);
3994 mtk_wed_fe_reset_complete();
3999 static int mtk_free_dev(struct mtk_eth *eth)
4003 for (i = 0; i < MTK_MAC_COUNT; i++) {
4004 if (!eth->netdev[i])
4006 free_netdev(eth->netdev[i]);
4009 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4010 if (!eth->dsa_meta[i])
4012 metadata_dst_free(eth->dsa_meta[i]);
4018 static int mtk_unreg_dev(struct mtk_eth *eth)
4022 for (i = 0; i < MTK_MAC_COUNT; i++) {
4023 struct mtk_mac *mac;
4024 if (!eth->netdev[i])
4026 mac = netdev_priv(eth->netdev[i]);
4027 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4028 unregister_netdevice_notifier(&mac->device_notifier);
4029 unregister_netdev(eth->netdev[i]);
4035 static int mtk_cleanup(struct mtk_eth *eth)
4039 cancel_work_sync(ð->pending_work);
4040 cancel_delayed_work_sync(ð->reset.monitor_work);
4045 static int mtk_get_link_ksettings(struct net_device *ndev,
4046 struct ethtool_link_ksettings *cmd)
4048 struct mtk_mac *mac = netdev_priv(ndev);
4050 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4053 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4056 static int mtk_set_link_ksettings(struct net_device *ndev,
4057 const struct ethtool_link_ksettings *cmd)
4059 struct mtk_mac *mac = netdev_priv(ndev);
4061 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4064 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4067 static void mtk_get_drvinfo(struct net_device *dev,
4068 struct ethtool_drvinfo *info)
4070 struct mtk_mac *mac = netdev_priv(dev);
4072 strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4073 strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4074 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4077 static u32 mtk_get_msglevel(struct net_device *dev)
4079 struct mtk_mac *mac = netdev_priv(dev);
4081 return mac->hw->msg_enable;
4084 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4086 struct mtk_mac *mac = netdev_priv(dev);
4088 mac->hw->msg_enable = value;
4091 static int mtk_nway_reset(struct net_device *dev)
4093 struct mtk_mac *mac = netdev_priv(dev);
4095 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4101 return phylink_ethtool_nway_reset(mac->phylink);
4104 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4108 switch (stringset) {
4109 case ETH_SS_STATS: {
4110 struct mtk_mac *mac = netdev_priv(dev);
4112 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4113 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4114 data += ETH_GSTRING_LEN;
4116 if (mtk_page_pool_enabled(mac->hw))
4117 page_pool_ethtool_stats_get_strings(data);
4125 static int mtk_get_sset_count(struct net_device *dev, int sset)
4128 case ETH_SS_STATS: {
4129 int count = ARRAY_SIZE(mtk_ethtool_stats);
4130 struct mtk_mac *mac = netdev_priv(dev);
4132 if (mtk_page_pool_enabled(mac->hw))
4133 count += page_pool_ethtool_stats_get_count();
4141 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4143 struct page_pool_stats stats = {};
4146 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4147 struct mtk_rx_ring *ring = ð->rx_ring[i];
4149 if (!ring->page_pool)
4152 page_pool_get_stats(ring->page_pool, &stats);
4154 page_pool_ethtool_stats_get(data, &stats);
4157 static void mtk_get_ethtool_stats(struct net_device *dev,
4158 struct ethtool_stats *stats, u64 *data)
4160 struct mtk_mac *mac = netdev_priv(dev);
4161 struct mtk_hw_stats *hwstats = mac->hw_stats;
4162 u64 *data_src, *data_dst;
4166 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4169 if (netif_running(dev) && netif_device_present(dev)) {
4170 if (spin_trylock_bh(&hwstats->stats_lock)) {
4171 mtk_stats_update_mac(mac);
4172 spin_unlock_bh(&hwstats->stats_lock);
4176 data_src = (u64 *)hwstats;
4180 start = u64_stats_fetch_begin(&hwstats->syncp);
4182 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4183 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4184 if (mtk_page_pool_enabled(mac->hw))
4185 mtk_ethtool_pp_stats(mac->hw, data_dst);
4186 } while (u64_stats_fetch_retry(&hwstats->syncp, start));
4189 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4192 int ret = -EOPNOTSUPP;
4195 case ETHTOOL_GRXRINGS:
4196 if (dev->hw_features & NETIF_F_LRO) {
4197 cmd->data = MTK_MAX_RX_RING_NUM;
4201 case ETHTOOL_GRXCLSRLCNT:
4202 if (dev->hw_features & NETIF_F_LRO) {
4203 struct mtk_mac *mac = netdev_priv(dev);
4205 cmd->rule_cnt = mac->hwlro_ip_cnt;
4209 case ETHTOOL_GRXCLSRULE:
4210 if (dev->hw_features & NETIF_F_LRO)
4211 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4213 case ETHTOOL_GRXCLSRLALL:
4214 if (dev->hw_features & NETIF_F_LRO)
4215 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4225 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4227 int ret = -EOPNOTSUPP;
4230 case ETHTOOL_SRXCLSRLINS:
4231 if (dev->hw_features & NETIF_F_LRO)
4232 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4234 case ETHTOOL_SRXCLSRLDEL:
4235 if (dev->hw_features & NETIF_F_LRO)
4236 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4245 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4246 struct net_device *sb_dev)
4248 struct mtk_mac *mac = netdev_priv(dev);
4249 unsigned int queue = 0;
4251 if (netdev_uses_dsa(dev))
4252 queue = skb_get_queue_mapping(skb) + 3;
4256 if (queue >= dev->num_tx_queues)
4262 static const struct ethtool_ops mtk_ethtool_ops = {
4263 .get_link_ksettings = mtk_get_link_ksettings,
4264 .set_link_ksettings = mtk_set_link_ksettings,
4265 .get_drvinfo = mtk_get_drvinfo,
4266 .get_msglevel = mtk_get_msglevel,
4267 .set_msglevel = mtk_set_msglevel,
4268 .nway_reset = mtk_nway_reset,
4269 .get_link = ethtool_op_get_link,
4270 .get_strings = mtk_get_strings,
4271 .get_sset_count = mtk_get_sset_count,
4272 .get_ethtool_stats = mtk_get_ethtool_stats,
4273 .get_rxnfc = mtk_get_rxnfc,
4274 .set_rxnfc = mtk_set_rxnfc,
4277 static const struct net_device_ops mtk_netdev_ops = {
4278 .ndo_init = mtk_init,
4279 .ndo_uninit = mtk_uninit,
4280 .ndo_open = mtk_open,
4281 .ndo_stop = mtk_stop,
4282 .ndo_start_xmit = mtk_start_xmit,
4283 .ndo_set_mac_address = mtk_set_mac_address,
4284 .ndo_validate_addr = eth_validate_addr,
4285 .ndo_eth_ioctl = mtk_do_ioctl,
4286 .ndo_change_mtu = mtk_change_mtu,
4287 .ndo_tx_timeout = mtk_tx_timeout,
4288 .ndo_get_stats64 = mtk_get_stats64,
4289 .ndo_fix_features = mtk_fix_features,
4290 .ndo_set_features = mtk_set_features,
4291 #ifdef CONFIG_NET_POLL_CONTROLLER
4292 .ndo_poll_controller = mtk_poll_controller,
4294 .ndo_setup_tc = mtk_eth_setup_tc,
4296 .ndo_xdp_xmit = mtk_xdp_xmit,
4297 .ndo_select_queue = mtk_select_queue,
4300 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4302 const __be32 *_id = of_get_property(np, "reg", NULL);
4303 phy_interface_t phy_mode;
4304 struct phylink *phylink;
4305 struct mtk_mac *mac;
4311 dev_err(eth->dev, "missing mac id\n");
4315 id = be32_to_cpup(_id);
4316 if (id >= MTK_MAC_COUNT) {
4317 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4321 if (eth->netdev[id]) {
4322 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4326 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4327 txqs = MTK_QDMA_NUM_QUEUES;
4329 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4330 if (!eth->netdev[id]) {
4331 dev_err(eth->dev, "alloc_etherdev failed\n");
4334 mac = netdev_priv(eth->netdev[id]);
4340 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4341 mac->hwlro_ip_cnt = 0;
4343 mac->hw_stats = devm_kzalloc(eth->dev,
4344 sizeof(*mac->hw_stats),
4346 if (!mac->hw_stats) {
4347 dev_err(eth->dev, "failed to allocate counter memory\n");
4351 spin_lock_init(&mac->hw_stats->stats_lock);
4352 u64_stats_init(&mac->hw_stats->syncp);
4353 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
4355 /* phylink create */
4356 err = of_get_phy_mode(np, &phy_mode);
4358 dev_err(eth->dev, "incorrect phy-mode\n");
4362 /* mac config is not set */
4363 mac->interface = PHY_INTERFACE_MODE_NA;
4364 mac->speed = SPEED_UNKNOWN;
4366 mac->phylink_config.dev = ð->netdev[id]->dev;
4367 mac->phylink_config.type = PHYLINK_NETDEV;
4368 /* This driver makes use of state->speed in mac_config */
4369 mac->phylink_config.legacy_pre_march2020 = true;
4370 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4371 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4373 __set_bit(PHY_INTERFACE_MODE_MII,
4374 mac->phylink_config.supported_interfaces);
4375 __set_bit(PHY_INTERFACE_MODE_GMII,
4376 mac->phylink_config.supported_interfaces);
4378 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4379 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4381 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4382 __set_bit(PHY_INTERFACE_MODE_TRGMII,
4383 mac->phylink_config.supported_interfaces);
4385 /* TRGMII is not permitted on MT7621 if using DDR2 */
4386 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4387 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4388 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4389 if (val & SYSCFG_DRAM_TYPE_DDR2)
4390 __clear_bit(PHY_INTERFACE_MODE_TRGMII,
4391 mac->phylink_config.supported_interfaces);
4394 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4395 __set_bit(PHY_INTERFACE_MODE_SGMII,
4396 mac->phylink_config.supported_interfaces);
4397 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
4398 mac->phylink_config.supported_interfaces);
4399 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
4400 mac->phylink_config.supported_interfaces);
4403 phylink = phylink_create(&mac->phylink_config,
4404 of_fwnode_handle(mac->of_node),
4405 phy_mode, &mtk_phylink_ops);
4406 if (IS_ERR(phylink)) {
4407 err = PTR_ERR(phylink);
4411 mac->phylink = phylink;
4413 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4414 eth->netdev[id]->watchdog_timeo = 5 * HZ;
4415 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4416 eth->netdev[id]->base_addr = (unsigned long)eth->base;
4418 eth->netdev[id]->hw_features = eth->soc->hw_features;
4420 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4422 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4423 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
4424 eth->netdev[id]->features |= eth->soc->hw_features;
4425 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4427 eth->netdev[id]->irq = eth->irq[0];
4428 eth->netdev[id]->dev.of_node = np;
4430 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4431 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4433 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4435 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4436 mac->device_notifier.notifier_call = mtk_device_event;
4437 register_netdevice_notifier(&mac->device_notifier);
4440 if (mtk_page_pool_enabled(eth))
4441 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4442 NETDEV_XDP_ACT_REDIRECT |
4443 NETDEV_XDP_ACT_NDO_XMIT |
4444 NETDEV_XDP_ACT_NDO_XMIT_SG;
4449 free_netdev(eth->netdev[id]);
4453 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4455 struct net_device *dev, *tmp;
4456 LIST_HEAD(dev_list);
4461 for (i = 0; i < MTK_MAC_COUNT; i++) {
4462 dev = eth->netdev[i];
4464 if (!dev || !(dev->flags & IFF_UP))
4467 list_add_tail(&dev->close_list, &dev_list);
4470 dev_close_many(&dev_list, false);
4472 eth->dma_dev = dma_dev;
4474 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4475 list_del_init(&dev->close_list);
4476 dev_open(dev, NULL);
4482 static int mtk_probe(struct platform_device *pdev)
4484 struct resource *res = NULL;
4485 struct device_node *mac_np;
4486 struct mtk_eth *eth;
4489 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4493 eth->soc = of_device_get_match_data(&pdev->dev);
4495 eth->dev = &pdev->dev;
4496 eth->dma_dev = &pdev->dev;
4497 eth->base = devm_platform_ioremap_resource(pdev, 0);
4498 if (IS_ERR(eth->base))
4499 return PTR_ERR(eth->base);
4501 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4502 eth->ip_align = NET_IP_ALIGN;
4504 spin_lock_init(ð->page_lock);
4505 spin_lock_init(ð->tx_irq_lock);
4506 spin_lock_init(ð->rx_irq_lock);
4507 spin_lock_init(ð->dim_lock);
4509 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4510 INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
4511 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work);
4513 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4514 INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
4516 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4517 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4519 if (IS_ERR(eth->ethsys)) {
4520 dev_err(&pdev->dev, "no ethsys regmap found\n");
4521 return PTR_ERR(eth->ethsys);
4525 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4526 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4527 "mediatek,infracfg");
4528 if (IS_ERR(eth->infra)) {
4529 dev_err(&pdev->dev, "no infracfg regmap found\n");
4530 return PTR_ERR(eth->infra);
4534 if (of_dma_is_coherent(pdev->dev.of_node)) {
4537 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4538 "cci-control-port");
4539 /* enable CPU/bus coherency */
4541 regmap_write(cci, 0, 3);
4544 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4545 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
4550 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
4551 eth->soc->ana_rgc3);
4557 if (eth->soc->required_pctl) {
4558 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4560 if (IS_ERR(eth->pctl)) {
4561 dev_err(&pdev->dev, "no pctl regmap found\n");
4562 return PTR_ERR(eth->pctl);
4566 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
4567 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4572 if (eth->soc->offload_version) {
4574 struct device_node *np;
4575 phys_addr_t wdma_phy;
4578 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4581 np = of_parse_phandle(pdev->dev.of_node,
4586 wdma_base = eth->soc->reg_map->wdma_base[i];
4587 wdma_phy = res ? res->start + wdma_base : 0;
4588 mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4593 for (i = 0; i < 3; i++) {
4594 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4595 eth->irq[i] = eth->irq[0];
4597 eth->irq[i] = platform_get_irq(pdev, i);
4598 if (eth->irq[i] < 0) {
4599 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4604 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4605 eth->clks[i] = devm_clk_get(eth->dev,
4606 mtk_clks_source_name[i]);
4607 if (IS_ERR(eth->clks[i])) {
4608 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4609 err = -EPROBE_DEFER;
4612 if (eth->soc->required_clks & BIT(i)) {
4613 dev_err(&pdev->dev, "clock %s not found\n",
4614 mtk_clks_source_name[i]);
4618 eth->clks[i] = NULL;
4622 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4623 INIT_WORK(ð->pending_work, mtk_pending_work);
4625 err = mtk_hw_init(eth, false);
4629 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4631 for_each_child_of_node(pdev->dev.of_node, mac_np) {
4632 if (!of_device_is_compatible(mac_np,
4633 "mediatek,eth-mac"))
4636 if (!of_device_is_available(mac_np))
4639 err = mtk_add_mac(eth, mac_np);
4641 of_node_put(mac_np);
4646 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4647 err = devm_request_irq(eth->dev, eth->irq[0],
4649 dev_name(eth->dev), eth);
4651 err = devm_request_irq(eth->dev, eth->irq[1],
4652 mtk_handle_irq_tx, 0,
4653 dev_name(eth->dev), eth);
4657 err = devm_request_irq(eth->dev, eth->irq[2],
4658 mtk_handle_irq_rx, 0,
4659 dev_name(eth->dev), eth);
4664 /* No MT7628/88 support yet */
4665 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4666 err = mtk_mdio_init(eth);
4671 if (eth->soc->offload_version) {
4674 num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
4675 num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
4676 for (i = 0; i < num_ppe; i++) {
4677 u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
4679 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
4680 eth->soc->offload_version, i);
4683 goto err_deinit_ppe;
4687 err = mtk_eth_offload_init(eth);
4689 goto err_deinit_ppe;
4692 for (i = 0; i < MTK_MAX_DEVS; i++) {
4693 if (!eth->netdev[i])
4696 err = register_netdev(eth->netdev[i]);
4698 dev_err(eth->dev, "error bringing up device\n");
4699 goto err_deinit_ppe;
4701 netif_info(eth, probe, eth->netdev[i],
4702 "mediatek frame engine at 0x%08lx, irq %d\n",
4703 eth->netdev[i]->base_addr, eth->irq[0]);
4706 /* we run 2 devices on the same DMA ring so we need a dummy device
4709 init_dummy_netdev(ð->dummy_dev);
4710 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx);
4711 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx);
4713 platform_set_drvdata(pdev, eth);
4714 schedule_delayed_work(ð->reset.monitor_work,
4715 MTK_DMA_MONITOR_TIMEOUT);
4720 mtk_ppe_deinit(eth);
4721 mtk_mdio_cleanup(eth);
4732 static int mtk_remove(struct platform_device *pdev)
4734 struct mtk_eth *eth = platform_get_drvdata(pdev);
4735 struct mtk_mac *mac;
4738 /* stop all devices to make sure that dma is properly shut down */
4739 for (i = 0; i < MTK_MAC_COUNT; i++) {
4740 if (!eth->netdev[i])
4742 mtk_stop(eth->netdev[i]);
4743 mac = netdev_priv(eth->netdev[i]);
4744 phylink_disconnect_phy(mac->phylink);
4750 netif_napi_del(ð->tx_napi);
4751 netif_napi_del(ð->rx_napi);
4753 mtk_mdio_cleanup(eth);
4758 static const struct mtk_soc_data mt2701_data = {
4759 .reg_map = &mtk_reg_map,
4760 .caps = MT7623_CAPS | MTK_HWLRO,
4761 .hw_features = MTK_HW_FEATURES,
4762 .required_clks = MT7623_CLKS_BITMAP,
4763 .required_pctl = true,
4765 .txd_size = sizeof(struct mtk_tx_dma),
4766 .rxd_size = sizeof(struct mtk_rx_dma),
4767 .rx_irq_done_mask = MTK_RX_DONE_INT,
4768 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4769 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4770 .dma_len_offset = 16,
4774 static const struct mtk_soc_data mt7621_data = {
4775 .reg_map = &mtk_reg_map,
4776 .caps = MT7621_CAPS,
4777 .hw_features = MTK_HW_FEATURES,
4778 .required_clks = MT7621_CLKS_BITMAP,
4779 .required_pctl = false,
4780 .offload_version = 1,
4782 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4784 .txd_size = sizeof(struct mtk_tx_dma),
4785 .rxd_size = sizeof(struct mtk_rx_dma),
4786 .rx_irq_done_mask = MTK_RX_DONE_INT,
4787 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4788 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4789 .dma_len_offset = 16,
4793 static const struct mtk_soc_data mt7622_data = {
4794 .reg_map = &mtk_reg_map,
4796 .caps = MT7622_CAPS | MTK_HWLRO,
4797 .hw_features = MTK_HW_FEATURES,
4798 .required_clks = MT7622_CLKS_BITMAP,
4799 .required_pctl = false,
4800 .offload_version = 2,
4802 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4804 .txd_size = sizeof(struct mtk_tx_dma),
4805 .rxd_size = sizeof(struct mtk_rx_dma),
4806 .rx_irq_done_mask = MTK_RX_DONE_INT,
4807 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4808 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4809 .dma_len_offset = 16,
4813 static const struct mtk_soc_data mt7623_data = {
4814 .reg_map = &mtk_reg_map,
4815 .caps = MT7623_CAPS | MTK_HWLRO,
4816 .hw_features = MTK_HW_FEATURES,
4817 .required_clks = MT7623_CLKS_BITMAP,
4818 .required_pctl = true,
4819 .offload_version = 1,
4821 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4823 .txd_size = sizeof(struct mtk_tx_dma),
4824 .rxd_size = sizeof(struct mtk_rx_dma),
4825 .rx_irq_done_mask = MTK_RX_DONE_INT,
4826 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4827 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4828 .dma_len_offset = 16,
4832 static const struct mtk_soc_data mt7629_data = {
4833 .reg_map = &mtk_reg_map,
4835 .caps = MT7629_CAPS | MTK_HWLRO,
4836 .hw_features = MTK_HW_FEATURES,
4837 .required_clks = MT7629_CLKS_BITMAP,
4838 .required_pctl = false,
4840 .txd_size = sizeof(struct mtk_tx_dma),
4841 .rxd_size = sizeof(struct mtk_rx_dma),
4842 .rx_irq_done_mask = MTK_RX_DONE_INT,
4843 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4844 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4845 .dma_len_offset = 16,
4849 static const struct mtk_soc_data mt7986_data = {
4850 .reg_map = &mt7986_reg_map,
4852 .caps = MT7986_CAPS,
4853 .hw_features = MTK_HW_FEATURES,
4854 .required_clks = MT7986_CLKS_BITMAP,
4855 .required_pctl = false,
4856 .offload_version = 2,
4858 .foe_entry_size = sizeof(struct mtk_foe_entry),
4860 .txd_size = sizeof(struct mtk_tx_dma_v2),
4861 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4862 .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
4863 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
4864 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4865 .dma_len_offset = 8,
4869 static const struct mtk_soc_data rt5350_data = {
4870 .reg_map = &mt7628_reg_map,
4871 .caps = MT7628_CAPS,
4872 .hw_features = MTK_HW_FEATURES_MT7628,
4873 .required_clks = MT7628_CLKS_BITMAP,
4874 .required_pctl = false,
4876 .txd_size = sizeof(struct mtk_tx_dma),
4877 .rxd_size = sizeof(struct mtk_rx_dma),
4878 .rx_irq_done_mask = MTK_RX_DONE_INT,
4879 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
4880 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4881 .dma_len_offset = 16,
4885 const struct of_device_id of_mtk_match[] = {
4886 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4887 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4888 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4889 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4890 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4891 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
4892 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4895 MODULE_DEVICE_TABLE(of, of_mtk_match);
4897 static struct platform_driver mtk_driver = {
4899 .remove = mtk_remove,
4901 .name = "mtk_soc_eth",
4902 .of_match_table = of_mtk_match,
4906 module_platform_driver(mtk_driver);
4908 MODULE_LICENSE("GPL");
4909 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4910 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");