1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/if_vlan.h>
19 #include <linux/reset.h>
20 #include <linux/tcp.h>
21 #include <linux/interrupt.h>
22 #include <linux/pinctrl/devinfo.h>
23 #include <linux/phylink.h>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
25 #include <linux/jhash.h>
26 #include <linux/bitfield.h>
28 #include <net/dst_metadata.h>
29 #include <net/page_pool/helpers.h>
31 #include "mtk_eth_soc.h"
34 static int mtk_msg_level = -1;
35 module_param_named(msg_level, mtk_msg_level, int, 0);
36 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
38 #define MTK_ETHTOOL_STAT(x) { #x, \
39 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
41 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
42 offsetof(struct mtk_hw_stats, xdp_stats.x) / \
45 static const struct mtk_reg_map mtk_reg_map = {
46 .tx_irq_mask = 0x1a1c,
47 .tx_irq_status = 0x1a18,
57 .adma_rx_dbg0 = 0x0a38,
70 .tx_sch_rate = 0x1a14,
83 .gdma_to_ppe = 0x4444,
93 static const struct mtk_reg_map mt7628_reg_map = {
94 .tx_irq_mask = 0x0a28,
95 .tx_irq_status = 0x0a20,
103 .irq_status = 0x0a20,
109 static const struct mtk_reg_map mt7986_reg_map = {
110 .tx_irq_mask = 0x461c,
111 .tx_irq_status = 0x4618,
114 .rx_cnt_cfg = 0x6104,
119 .irq_status = 0x6220,
121 .adma_rx_dbg0 = 0x6238,
128 .rx_cnt_cfg = 0x4504,
144 .tx_sch_rate = 0x4798,
147 .gdma_to_ppe = 0x3333,
153 .pse_iq_sta = 0x0180,
154 .pse_oq_sta = 0x01a0,
157 static const struct mtk_reg_map mt7988_reg_map = {
158 .tx_irq_mask = 0x461c,
159 .tx_irq_status = 0x4618,
162 .rx_cnt_cfg = 0x6904,
167 .irq_status = 0x6a20,
169 .adma_rx_dbg0 = 0x6a38,
176 .rx_cnt_cfg = 0x4504,
192 .tx_sch_rate = 0x4798,
195 .gdma_to_ppe = 0x3333,
202 .pse_iq_sta = 0x0180,
203 .pse_oq_sta = 0x01a0,
206 /* strings used by ethtool */
207 static const struct mtk_ethtool_stats {
208 char str[ETH_GSTRING_LEN];
210 } mtk_ethtool_stats[] = {
211 MTK_ETHTOOL_STAT(tx_bytes),
212 MTK_ETHTOOL_STAT(tx_packets),
213 MTK_ETHTOOL_STAT(tx_skip),
214 MTK_ETHTOOL_STAT(tx_collisions),
215 MTK_ETHTOOL_STAT(rx_bytes),
216 MTK_ETHTOOL_STAT(rx_packets),
217 MTK_ETHTOOL_STAT(rx_overflow),
218 MTK_ETHTOOL_STAT(rx_fcs_errors),
219 MTK_ETHTOOL_STAT(rx_short_errors),
220 MTK_ETHTOOL_STAT(rx_long_errors),
221 MTK_ETHTOOL_STAT(rx_checksum_errors),
222 MTK_ETHTOOL_STAT(rx_flow_control_packets),
223 MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
224 MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
225 MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
226 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
227 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
228 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
229 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
232 static const char * const mtk_clks_source_name[] = {
267 "top_xfi_phy0_xtal_sel",
268 "top_xfi_phy1_xtal_sel",
270 "top_eth_refck_50m_sel",
271 "top_eth_sys_200m_sel",
276 "top_netsys_500m_sel",
277 "top_netsys_pao_2x_sel",
278 "top_netsys_sync_250m_sel",
279 "top_netsys_ppefb_250m_sel",
280 "top_netsys_warp_sel",
283 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
285 __raw_writel(val, eth->base + reg);
288 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
290 return __raw_readl(eth->base + reg);
293 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
297 val = mtk_r32(eth, reg);
300 mtk_w32(eth, val, reg);
304 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
306 unsigned long t_start = jiffies;
309 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
311 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
316 dev_err(eth->dev, "mdio: MDIO timeout\n");
320 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
325 ret = mtk_mdio_busy_wait(eth);
329 mtk_w32(eth, PHY_IAC_ACCESS |
332 PHY_IAC_REG(phy_reg) |
333 PHY_IAC_ADDR(phy_addr) |
334 PHY_IAC_DATA(write_data),
337 ret = mtk_mdio_busy_wait(eth);
344 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
345 u32 devad, u32 phy_reg, u32 write_data)
349 ret = mtk_mdio_busy_wait(eth);
353 mtk_w32(eth, PHY_IAC_ACCESS |
355 PHY_IAC_CMD_C45_ADDR |
357 PHY_IAC_ADDR(phy_addr) |
358 PHY_IAC_DATA(phy_reg),
361 ret = mtk_mdio_busy_wait(eth);
365 mtk_w32(eth, PHY_IAC_ACCESS |
369 PHY_IAC_ADDR(phy_addr) |
370 PHY_IAC_DATA(write_data),
373 ret = mtk_mdio_busy_wait(eth);
380 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
384 ret = mtk_mdio_busy_wait(eth);
388 mtk_w32(eth, PHY_IAC_ACCESS |
390 PHY_IAC_CMD_C22_READ |
391 PHY_IAC_REG(phy_reg) |
392 PHY_IAC_ADDR(phy_addr),
395 ret = mtk_mdio_busy_wait(eth);
399 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
402 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
403 u32 devad, u32 phy_reg)
407 ret = mtk_mdio_busy_wait(eth);
411 mtk_w32(eth, PHY_IAC_ACCESS |
413 PHY_IAC_CMD_C45_ADDR |
415 PHY_IAC_ADDR(phy_addr) |
416 PHY_IAC_DATA(phy_reg),
419 ret = mtk_mdio_busy_wait(eth);
423 mtk_w32(eth, PHY_IAC_ACCESS |
425 PHY_IAC_CMD_C45_READ |
427 PHY_IAC_ADDR(phy_addr),
430 ret = mtk_mdio_busy_wait(eth);
434 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
437 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
438 int phy_reg, u16 val)
440 struct mtk_eth *eth = bus->priv;
442 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
445 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
446 int devad, int phy_reg, u16 val)
448 struct mtk_eth *eth = bus->priv;
450 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
453 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
455 struct mtk_eth *eth = bus->priv;
457 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
460 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
463 struct mtk_eth *eth = bus->priv;
465 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
468 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
469 phy_interface_t interface)
473 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
474 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
476 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
477 ETHSYS_TRGMII_MT7621_MASK, val);
482 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
483 phy_interface_t interface)
487 if (interface == PHY_INTERFACE_MODE_TRGMII) {
488 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
489 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
491 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
495 dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
498 static void mtk_setup_bridge_switch(struct mtk_eth *eth)
500 /* Force Port1 XGMAC Link Up */
501 mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
502 MTK_XGMAC_STS(MTK_GMAC1_ID));
504 /* Adjust GSW bridge IPG to 11 */
505 mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
506 (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
507 (GSW_IPG_11 << GSWRX_IPG_SHIFT),
511 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
512 phy_interface_t interface)
514 struct mtk_mac *mac = container_of(config, struct mtk_mac,
516 struct mtk_eth *eth = mac->hw;
519 if (interface == PHY_INTERFACE_MODE_SGMII ||
520 phy_interface_mode_is_8023z(interface)) {
521 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
524 return eth->sgmii_pcs[sid];
530 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
531 const struct phylink_link_state *state)
533 struct mtk_mac *mac = container_of(config, struct mtk_mac,
535 struct mtk_eth *eth = mac->hw;
536 int val, ge_mode, err = 0;
539 /* MT76x8 has no hardware settings between for the MAC */
540 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
541 mac->interface != state->interface) {
542 /* Setup soc pin functions */
543 switch (state->interface) {
544 case PHY_INTERFACE_MODE_TRGMII:
545 case PHY_INTERFACE_MODE_RGMII_TXID:
546 case PHY_INTERFACE_MODE_RGMII_RXID:
547 case PHY_INTERFACE_MODE_RGMII_ID:
548 case PHY_INTERFACE_MODE_RGMII:
549 case PHY_INTERFACE_MODE_MII:
550 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
551 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
556 case PHY_INTERFACE_MODE_1000BASEX:
557 case PHY_INTERFACE_MODE_2500BASEX:
558 case PHY_INTERFACE_MODE_SGMII:
559 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
563 case PHY_INTERFACE_MODE_GMII:
564 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
565 err = mtk_gmac_gephy_path_setup(eth, mac->id);
570 case PHY_INTERFACE_MODE_INTERNAL:
576 /* Setup clock for 1st gmac */
577 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
578 !phy_interface_mode_is_8023z(state->interface) &&
579 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
580 if (MTK_HAS_CAPS(mac->hw->soc->caps,
581 MTK_TRGMII_MT7621_CLK)) {
582 if (mt7621_gmac0_rgmii_adjust(mac->hw,
586 mtk_gmac0_rgmii_adjust(mac->hw,
589 /* mt7623_pad_clk_setup */
590 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
592 TD_DM_DRVP(8) | TD_DM_DRVN(8),
595 /* Assert/release MT7623 RXC reset */
596 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
598 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
602 switch (state->interface) {
603 case PHY_INTERFACE_MODE_MII:
604 case PHY_INTERFACE_MODE_GMII:
612 /* put the gmac into the right mode */
613 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
614 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
615 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
616 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
618 mac->interface = state->interface;
622 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
623 phy_interface_mode_is_8023z(state->interface)) {
624 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
627 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
629 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
631 ~(u32)SYSCFG0_SGMII_MASK);
633 /* Save the syscfg0 value for mac_finish */
635 } else if (phylink_autoneg_inband(mode)) {
637 "In-band mode not supported in non SGMII mode!\n");
642 if (mtk_is_netsys_v3_or_greater(eth) &&
643 mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
644 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
645 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
647 mtk_setup_bridge_switch(eth);
653 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
654 mac->id, phy_modes(state->interface));
658 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
659 mac->id, phy_modes(state->interface), err);
662 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
663 phy_interface_t interface)
665 struct mtk_mac *mac = container_of(config, struct mtk_mac,
667 struct mtk_eth *eth = mac->hw;
668 u32 mcr_cur, mcr_new;
671 if (interface == PHY_INTERFACE_MODE_SGMII ||
672 phy_interface_mode_is_8023z(interface))
673 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
674 SYSCFG0_SGMII_MASK, mac->syscfg0);
677 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
679 mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
680 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
681 MAC_MCR_RX_FIFO_CLR_DIS;
683 /* Only update control register when needed! */
684 if (mcr_new != mcr_cur)
685 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
690 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
691 phy_interface_t interface)
693 struct mtk_mac *mac = container_of(config, struct mtk_mac,
695 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
697 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
698 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
701 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
704 const struct mtk_soc_data *soc = eth->soc;
707 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
710 val = MTK_QTX_SCH_MIN_RATE_EN |
711 /* minimum: 10 Mbps */
712 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
713 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
714 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
715 if (mtk_is_netsys_v1(eth))
716 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
718 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
721 val |= MTK_QTX_SCH_MAX_RATE_EN |
722 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
723 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
724 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
727 val |= MTK_QTX_SCH_MAX_RATE_EN |
728 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
729 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
730 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
733 val |= MTK_QTX_SCH_MAX_RATE_EN |
734 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
735 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
736 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
744 val |= MTK_QTX_SCH_MAX_RATE_EN |
745 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
746 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
747 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
750 val |= MTK_QTX_SCH_MAX_RATE_EN |
751 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
752 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
753 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
756 val |= MTK_QTX_SCH_MAX_RATE_EN |
757 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
758 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
759 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
766 ofs = MTK_QTX_OFFSET * idx;
767 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
770 static void mtk_mac_link_up(struct phylink_config *config,
771 struct phy_device *phy,
772 unsigned int mode, phy_interface_t interface,
773 int speed, int duplex, bool tx_pause, bool rx_pause)
775 struct mtk_mac *mac = container_of(config, struct mtk_mac,
779 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
780 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
781 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
782 MAC_MCR_FORCE_RX_FC);
784 /* Configure speed */
789 mcr |= MAC_MCR_SPEED_1000;
792 mcr |= MAC_MCR_SPEED_100;
796 /* Configure duplex */
797 if (duplex == DUPLEX_FULL)
798 mcr |= MAC_MCR_FORCE_DPX;
800 /* Configure pause modes - phylink will avoid these for half duplex */
802 mcr |= MAC_MCR_FORCE_TX_FC;
804 mcr |= MAC_MCR_FORCE_RX_FC;
806 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
807 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
810 static const struct phylink_mac_ops mtk_phylink_ops = {
811 .mac_select_pcs = mtk_mac_select_pcs,
812 .mac_config = mtk_mac_config,
813 .mac_finish = mtk_mac_finish,
814 .mac_link_down = mtk_mac_link_down,
815 .mac_link_up = mtk_mac_link_up,
818 static int mtk_mdio_init(struct mtk_eth *eth)
820 unsigned int max_clk = 2500000, divider;
821 struct device_node *mii_np;
825 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
827 dev_err(eth->dev, "no %s child node found", "mdio-bus");
831 if (!of_device_is_available(mii_np)) {
836 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
842 eth->mii_bus->name = "mdio";
843 eth->mii_bus->read = mtk_mdio_read_c22;
844 eth->mii_bus->write = mtk_mdio_write_c22;
845 eth->mii_bus->read_c45 = mtk_mdio_read_c45;
846 eth->mii_bus->write_c45 = mtk_mdio_write_c45;
847 eth->mii_bus->priv = eth;
848 eth->mii_bus->parent = eth->dev;
850 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
852 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
853 if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
854 dev_err(eth->dev, "MDIO clock frequency out of range");
860 divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
862 /* Configure MDC Turbo Mode */
863 if (mtk_is_netsys_v3_or_greater(eth))
864 mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
866 /* Configure MDC Divider */
867 val = FIELD_PREP(PPSC_MDC_CFG, divider);
868 if (!mtk_is_netsys_v3_or_greater(eth))
869 val |= PPSC_MDC_TURBO;
870 mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
872 dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
874 ret = of_mdiobus_register(eth->mii_bus, mii_np);
881 static void mtk_mdio_cleanup(struct mtk_eth *eth)
886 mdiobus_unregister(eth->mii_bus);
889 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
894 spin_lock_irqsave(ð->tx_irq_lock, flags);
895 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
896 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
897 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
900 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
905 spin_lock_irqsave(ð->tx_irq_lock, flags);
906 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
907 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
908 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
911 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
916 spin_lock_irqsave(ð->rx_irq_lock, flags);
917 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
918 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
919 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
922 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
927 spin_lock_irqsave(ð->rx_irq_lock, flags);
928 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
929 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
930 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
933 static int mtk_set_mac_address(struct net_device *dev, void *p)
935 int ret = eth_mac_addr(dev, p);
936 struct mtk_mac *mac = netdev_priv(dev);
937 struct mtk_eth *eth = mac->hw;
938 const char *macaddr = dev->dev_addr;
943 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
946 spin_lock_bh(&mac->hw->page_lock);
947 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
948 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
949 MT7628_SDM_MAC_ADRH);
950 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
951 (macaddr[4] << 8) | macaddr[5],
952 MT7628_SDM_MAC_ADRL);
954 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
955 MTK_GDMA_MAC_ADRH(mac->id));
956 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
957 (macaddr[4] << 8) | macaddr[5],
958 MTK_GDMA_MAC_ADRL(mac->id));
960 spin_unlock_bh(&mac->hw->page_lock);
965 void mtk_stats_update_mac(struct mtk_mac *mac)
967 struct mtk_hw_stats *hw_stats = mac->hw_stats;
968 struct mtk_eth *eth = mac->hw;
970 u64_stats_update_begin(&hw_stats->syncp);
972 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
973 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
974 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
975 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
976 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
977 hw_stats->rx_checksum_errors +=
978 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
980 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
981 unsigned int offs = hw_stats->reg_offset;
984 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
985 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
987 hw_stats->rx_bytes += (stats << 32);
988 hw_stats->rx_packets +=
989 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
990 hw_stats->rx_overflow +=
991 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
992 hw_stats->rx_fcs_errors +=
993 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
994 hw_stats->rx_short_errors +=
995 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
996 hw_stats->rx_long_errors +=
997 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
998 hw_stats->rx_checksum_errors +=
999 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
1000 hw_stats->rx_flow_control_packets +=
1001 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
1003 if (mtk_is_netsys_v3_or_greater(eth)) {
1004 hw_stats->tx_skip +=
1005 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1006 hw_stats->tx_collisions +=
1007 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1008 hw_stats->tx_bytes +=
1009 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1010 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
1012 hw_stats->tx_bytes += (stats << 32);
1013 hw_stats->tx_packets +=
1014 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
1016 hw_stats->tx_skip +=
1017 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1018 hw_stats->tx_collisions +=
1019 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1020 hw_stats->tx_bytes +=
1021 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1022 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1024 hw_stats->tx_bytes += (stats << 32);
1025 hw_stats->tx_packets +=
1026 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1030 u64_stats_update_end(&hw_stats->syncp);
1033 static void mtk_stats_update(struct mtk_eth *eth)
1037 for (i = 0; i < MTK_MAX_DEVS; i++) {
1038 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1040 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
1041 mtk_stats_update_mac(eth->mac[i]);
1042 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
1047 static void mtk_get_stats64(struct net_device *dev,
1048 struct rtnl_link_stats64 *storage)
1050 struct mtk_mac *mac = netdev_priv(dev);
1051 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1054 if (netif_running(dev) && netif_device_present(dev)) {
1055 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1056 mtk_stats_update_mac(mac);
1057 spin_unlock_bh(&hw_stats->stats_lock);
1062 start = u64_stats_fetch_begin(&hw_stats->syncp);
1063 storage->rx_packets = hw_stats->rx_packets;
1064 storage->tx_packets = hw_stats->tx_packets;
1065 storage->rx_bytes = hw_stats->rx_bytes;
1066 storage->tx_bytes = hw_stats->tx_bytes;
1067 storage->collisions = hw_stats->tx_collisions;
1068 storage->rx_length_errors = hw_stats->rx_short_errors +
1069 hw_stats->rx_long_errors;
1070 storage->rx_over_errors = hw_stats->rx_overflow;
1071 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1072 storage->rx_errors = hw_stats->rx_checksum_errors;
1073 storage->tx_aborted_errors = hw_stats->tx_skip;
1074 } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1076 storage->tx_errors = dev->stats.tx_errors;
1077 storage->rx_dropped = dev->stats.rx_dropped;
1078 storage->tx_dropped = dev->stats.tx_dropped;
1081 static inline int mtk_max_frag_size(int mtu)
1083 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1084 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1085 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1087 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1088 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1091 static inline int mtk_max_buf_size(int frag_size)
1093 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1094 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1096 WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1101 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1102 struct mtk_rx_dma_v2 *dma_rxd)
1104 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1105 if (!(rxd->rxd2 & RX_DMA_DONE))
1108 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1109 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1110 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1111 if (mtk_is_netsys_v2_or_greater(eth)) {
1112 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1113 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1119 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1121 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1124 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1127 return (void *)data;
1130 /* the qdma core needs scratch memory to be setup */
1131 static int mtk_init_fq_dma(struct mtk_eth *eth)
1133 const struct mtk_soc_data *soc = eth->soc;
1134 dma_addr_t phy_ring_tail;
1135 int cnt = MTK_QDMA_RING_SIZE;
1136 dma_addr_t dma_addr;
1139 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
1140 eth->scratch_ring = eth->sram_base;
1142 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1143 cnt * soc->txrx.txd_size,
1144 ð->phy_scratch_ring,
1146 if (unlikely(!eth->scratch_ring))
1149 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1150 if (unlikely(!eth->scratch_head))
1153 dma_addr = dma_map_single(eth->dma_dev,
1154 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1156 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1159 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
1161 for (i = 0; i < cnt; i++) {
1162 dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1163 struct mtk_tx_dma_v2 *txd;
1165 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1168 txd->txd2 = eth->phy_scratch_ring +
1169 (i + 1) * soc->txrx.txd_size;
1171 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1172 if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
1173 txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
1175 if (mtk_is_netsys_v2_or_greater(eth)) {
1183 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1184 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1185 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1186 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1191 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1193 return ring->dma + (desc - ring->phys);
1196 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1197 void *txd, u32 txd_size)
1199 int idx = (txd - ring->dma) / txd_size;
1201 return &ring->buf[idx];
1204 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1205 struct mtk_tx_dma *dma)
1207 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1210 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1212 return (dma - ring->dma) / txd_size;
1215 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1216 struct xdp_frame_bulk *bq, bool napi)
1218 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1219 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1220 dma_unmap_single(eth->dma_dev,
1221 dma_unmap_addr(tx_buf, dma_addr0),
1222 dma_unmap_len(tx_buf, dma_len0),
1224 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1225 dma_unmap_page(eth->dma_dev,
1226 dma_unmap_addr(tx_buf, dma_addr0),
1227 dma_unmap_len(tx_buf, dma_len0),
1231 if (dma_unmap_len(tx_buf, dma_len0)) {
1232 dma_unmap_page(eth->dma_dev,
1233 dma_unmap_addr(tx_buf, dma_addr0),
1234 dma_unmap_len(tx_buf, dma_len0),
1238 if (dma_unmap_len(tx_buf, dma_len1)) {
1239 dma_unmap_page(eth->dma_dev,
1240 dma_unmap_addr(tx_buf, dma_addr1),
1241 dma_unmap_len(tx_buf, dma_len1),
1246 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1247 if (tx_buf->type == MTK_TYPE_SKB) {
1248 struct sk_buff *skb = tx_buf->data;
1251 napi_consume_skb(skb, napi);
1253 dev_kfree_skb_any(skb);
1255 struct xdp_frame *xdpf = tx_buf->data;
1257 if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1258 xdp_return_frame_rx_napi(xdpf);
1260 xdp_return_frame_bulk(xdpf, bq);
1262 xdp_return_frame(xdpf);
1266 tx_buf->data = NULL;
1269 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1270 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1271 size_t size, int idx)
1273 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1274 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1275 dma_unmap_len_set(tx_buf, dma_len0, size);
1278 txd->txd3 = mapped_addr;
1279 txd->txd2 |= TX_DMA_PLEN1(size);
1280 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1281 dma_unmap_len_set(tx_buf, dma_len1, size);
1283 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1284 txd->txd1 = mapped_addr;
1285 txd->txd2 = TX_DMA_PLEN0(size);
1286 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1287 dma_unmap_len_set(tx_buf, dma_len0, size);
1292 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1293 struct mtk_tx_dma_desc_info *info)
1295 struct mtk_mac *mac = netdev_priv(dev);
1296 struct mtk_eth *eth = mac->hw;
1297 struct mtk_tx_dma *desc = txd;
1300 WRITE_ONCE(desc->txd1, info->addr);
1302 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1303 FIELD_PREP(TX_DMA_PQID, info->qid);
1306 WRITE_ONCE(desc->txd3, data);
1308 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1312 /* tx checksum offload */
1314 data |= TX_DMA_CHKSUM;
1315 /* vlan header offload */
1317 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1319 WRITE_ONCE(desc->txd4, data);
1322 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1323 struct mtk_tx_dma_desc_info *info)
1325 struct mtk_mac *mac = netdev_priv(dev);
1326 struct mtk_tx_dma_v2 *desc = txd;
1327 struct mtk_eth *eth = mac->hw;
1330 WRITE_ONCE(desc->txd1, info->addr);
1332 data = TX_DMA_PLEN0(info->size);
1336 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
1337 data |= TX_DMA_PREP_ADDR64(info->addr);
1339 WRITE_ONCE(desc->txd3, data);
1341 /* set forward port */
1344 data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1347 data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1350 data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1354 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1355 WRITE_ONCE(desc->txd4, data);
1360 data |= TX_DMA_TSO_V2;
1361 /* tx checksum offload */
1363 data |= TX_DMA_CHKSUM_V2;
1364 if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
1365 data |= TX_DMA_SPTAG_V3;
1367 WRITE_ONCE(desc->txd5, data);
1370 if (info->first && info->vlan)
1371 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1372 WRITE_ONCE(desc->txd6, data);
1374 WRITE_ONCE(desc->txd7, 0);
1375 WRITE_ONCE(desc->txd8, 0);
1378 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1379 struct mtk_tx_dma_desc_info *info)
1381 struct mtk_mac *mac = netdev_priv(dev);
1382 struct mtk_eth *eth = mac->hw;
1384 if (mtk_is_netsys_v2_or_greater(eth))
1385 mtk_tx_set_dma_desc_v2(dev, txd, info);
1387 mtk_tx_set_dma_desc_v1(dev, txd, info);
1390 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1391 int tx_num, struct mtk_tx_ring *ring, bool gso)
1393 struct mtk_tx_dma_desc_info txd_info = {
1394 .size = skb_headlen(skb),
1396 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1397 .vlan = skb_vlan_tag_present(skb),
1398 .qid = skb_get_queue_mapping(skb),
1399 .vlan_tci = skb_vlan_tag_get(skb),
1401 .last = !skb_is_nonlinear(skb),
1403 struct netdev_queue *txq;
1404 struct mtk_mac *mac = netdev_priv(dev);
1405 struct mtk_eth *eth = mac->hw;
1406 const struct mtk_soc_data *soc = eth->soc;
1407 struct mtk_tx_dma *itxd, *txd;
1408 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1409 struct mtk_tx_buf *itx_buf, *tx_buf;
1411 int queue = skb_get_queue_mapping(skb);
1414 txq = netdev_get_tx_queue(dev, queue);
1415 itxd = ring->next_free;
1416 itxd_pdma = qdma_to_pdma(ring, itxd);
1417 if (itxd == ring->last_free)
1420 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1421 memset(itx_buf, 0, sizeof(*itx_buf));
1423 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1425 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1428 mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1430 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1431 itx_buf->mac_id = mac->id;
1432 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1437 txd_pdma = qdma_to_pdma(ring, txd);
1439 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1440 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1441 unsigned int offset = 0;
1442 int frag_size = skb_frag_size(frag);
1445 bool new_desc = true;
1447 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1449 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1450 txd_pdma = qdma_to_pdma(ring, txd);
1451 if (txd == ring->last_free)
1459 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1460 txd_info.size = min_t(unsigned int, frag_size,
1461 soc->txrx.dma_max_len);
1462 txd_info.qid = queue;
1463 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1464 !(frag_size - txd_info.size);
1465 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1466 offset, txd_info.size,
1468 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1471 mtk_tx_set_dma_desc(dev, txd, &txd_info);
1473 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1474 soc->txrx.txd_size);
1476 memset(tx_buf, 0, sizeof(*tx_buf));
1477 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1478 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1479 tx_buf->mac_id = mac->id;
1481 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1482 txd_info.size, k++);
1484 frag_size -= txd_info.size;
1485 offset += txd_info.size;
1489 /* store skb to cleanup */
1490 itx_buf->type = MTK_TYPE_SKB;
1491 itx_buf->data = skb;
1493 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1495 txd_pdma->txd2 |= TX_DMA_LS0;
1497 txd_pdma->txd2 |= TX_DMA_LS1;
1500 netdev_tx_sent_queue(txq, skb->len);
1501 skb_tx_timestamp(skb);
1503 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1504 atomic_sub(n_desc, &ring->free_count);
1506 /* make sure that all changes to the dma ring are flushed before we
1511 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1512 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1513 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1517 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1519 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1526 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1529 mtk_tx_unmap(eth, tx_buf, NULL, false);
1531 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1532 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1533 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1535 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1536 itxd_pdma = qdma_to_pdma(ring, itxd);
1537 } while (itxd != txd);
1542 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1547 if (skb_is_gso(skb)) {
1548 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1549 frag = &skb_shinfo(skb)->frags[i];
1550 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1551 eth->soc->txrx.dma_max_len);
1554 nfrags += skb_shinfo(skb)->nr_frags;
1560 static int mtk_queue_stopped(struct mtk_eth *eth)
1564 for (i = 0; i < MTK_MAX_DEVS; i++) {
1565 if (!eth->netdev[i])
1567 if (netif_queue_stopped(eth->netdev[i]))
1574 static void mtk_wake_queue(struct mtk_eth *eth)
1578 for (i = 0; i < MTK_MAX_DEVS; i++) {
1579 if (!eth->netdev[i])
1581 netif_tx_wake_all_queues(eth->netdev[i]);
1585 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1587 struct mtk_mac *mac = netdev_priv(dev);
1588 struct mtk_eth *eth = mac->hw;
1589 struct mtk_tx_ring *ring = ð->tx_ring;
1590 struct net_device_stats *stats = &dev->stats;
1594 /* normally we can rely on the stack not calling this more than once,
1595 * however we have 2 queues running on the same ring so we need to lock
1598 spin_lock(ð->page_lock);
1600 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1603 tx_num = mtk_cal_txd_req(eth, skb);
1604 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1605 netif_tx_stop_all_queues(dev);
1606 netif_err(eth, tx_queued, dev,
1607 "Tx Ring full when queue awake!\n");
1608 spin_unlock(ð->page_lock);
1609 return NETDEV_TX_BUSY;
1612 /* TSO: fill MSS info in tcp checksum field */
1613 if (skb_is_gso(skb)) {
1614 if (skb_cow_head(skb, 0)) {
1615 netif_warn(eth, tx_err, dev,
1616 "GSO expand head fail.\n");
1620 if (skb_shinfo(skb)->gso_type &
1621 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1623 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1627 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1630 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1631 netif_tx_stop_all_queues(dev);
1633 spin_unlock(ð->page_lock);
1635 return NETDEV_TX_OK;
1638 spin_unlock(ð->page_lock);
1639 stats->tx_dropped++;
1640 dev_kfree_skb_any(skb);
1641 return NETDEV_TX_OK;
1644 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1647 struct mtk_rx_ring *ring;
1651 return ð->rx_ring[0];
1653 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1654 struct mtk_rx_dma *rxd;
1656 ring = ð->rx_ring[i];
1657 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1658 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1659 if (rxd->rxd2 & RX_DMA_DONE) {
1660 ring->calc_idx_update = true;
1668 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1670 struct mtk_rx_ring *ring;
1674 ring = ð->rx_ring[0];
1675 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1677 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1678 ring = ð->rx_ring[i];
1679 if (ring->calc_idx_update) {
1680 ring->calc_idx_update = false;
1681 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1687 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1689 return mtk_is_netsys_v2_or_greater(eth);
1692 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1693 struct xdp_rxq_info *xdp_q,
1696 struct page_pool_params pp_params = {
1698 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1700 .nid = NUMA_NO_NODE,
1701 .dev = eth->dma_dev,
1702 .offset = MTK_PP_HEADROOM,
1703 .max_len = MTK_PP_MAX_BUF_SIZE,
1705 struct page_pool *pp;
1708 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1710 pp = page_pool_create(&pp_params);
1714 err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id,
1715 eth->rx_napi.napi_id, PAGE_SIZE);
1719 err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1721 goto err_unregister_rxq;
1726 xdp_rxq_info_unreg(xdp_q);
1728 page_pool_destroy(pp);
1730 return ERR_PTR(err);
1733 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1738 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1742 *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1743 return page_address(page);
1746 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1748 if (ring->page_pool)
1749 page_pool_put_full_page(ring->page_pool,
1750 virt_to_head_page(data), napi);
1752 skb_free_frag(data);
1755 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1756 struct mtk_tx_dma_desc_info *txd_info,
1757 struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1758 void *data, u16 headroom, int index, bool dma_map)
1760 struct mtk_tx_ring *ring = ð->tx_ring;
1761 struct mtk_mac *mac = netdev_priv(dev);
1762 struct mtk_tx_dma *txd_pdma;
1764 if (dma_map) { /* ndo_xdp_xmit */
1765 txd_info->addr = dma_map_single(eth->dma_dev, data,
1766 txd_info->size, DMA_TO_DEVICE);
1767 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1770 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1772 struct page *page = virt_to_head_page(data);
1774 txd_info->addr = page_pool_get_dma_addr(page) +
1775 sizeof(struct xdp_frame) + headroom;
1776 dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1777 txd_info->size, DMA_BIDIRECTIONAL);
1779 mtk_tx_set_dma_desc(dev, txd, txd_info);
1781 tx_buf->mac_id = mac->id;
1782 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1783 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1785 txd_pdma = qdma_to_pdma(ring, txd);
1786 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1792 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1793 struct net_device *dev, bool dma_map)
1795 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1796 const struct mtk_soc_data *soc = eth->soc;
1797 struct mtk_tx_ring *ring = ð->tx_ring;
1798 struct mtk_mac *mac = netdev_priv(dev);
1799 struct mtk_tx_dma_desc_info txd_info = {
1802 .last = !xdp_frame_has_frags(xdpf),
1805 int err, index = 0, n_desc = 1, nr_frags;
1806 struct mtk_tx_buf *htx_buf, *tx_buf;
1807 struct mtk_tx_dma *htxd, *txd;
1808 void *data = xdpf->data;
1810 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1813 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1814 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1817 spin_lock(ð->page_lock);
1819 txd = ring->next_free;
1820 if (txd == ring->last_free) {
1821 spin_unlock(ð->page_lock);
1826 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
1827 memset(tx_buf, 0, sizeof(*tx_buf));
1831 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1832 data, xdpf->headroom, index, dma_map);
1839 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1840 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1841 if (txd == ring->last_free)
1844 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1845 soc->txrx.txd_size);
1846 memset(tx_buf, 0, sizeof(*tx_buf));
1850 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1851 txd_info.size = skb_frag_size(&sinfo->frags[index]);
1852 txd_info.last = index + 1 == nr_frags;
1853 txd_info.qid = mac->id;
1854 data = skb_frag_address(&sinfo->frags[index]);
1858 /* store xdpf for cleanup */
1859 htx_buf->data = xdpf;
1861 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1862 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1865 txd_pdma->txd2 |= TX_DMA_LS0;
1867 txd_pdma->txd2 |= TX_DMA_LS1;
1870 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1871 atomic_sub(n_desc, &ring->free_count);
1873 /* make sure that all changes to the dma ring are flushed before we
1878 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1879 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1883 idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
1884 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1885 MT7628_TX_CTX_IDX0);
1888 spin_unlock(ð->page_lock);
1893 while (htxd != txd) {
1894 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1895 mtk_tx_unmap(eth, tx_buf, NULL, false);
1897 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1898 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1899 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1901 txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1904 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1907 spin_unlock(ð->page_lock);
1912 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1913 struct xdp_frame **frames, u32 flags)
1915 struct mtk_mac *mac = netdev_priv(dev);
1916 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1917 struct mtk_eth *eth = mac->hw;
1920 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1923 for (i = 0; i < num_frame; i++) {
1924 if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1929 u64_stats_update_begin(&hw_stats->syncp);
1930 hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1931 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1932 u64_stats_update_end(&hw_stats->syncp);
1937 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1938 struct xdp_buff *xdp, struct net_device *dev)
1940 struct mtk_mac *mac = netdev_priv(dev);
1941 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1942 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1943 struct bpf_prog *prog;
1948 prog = rcu_dereference(eth->prog);
1952 act = bpf_prog_run_xdp(prog, xdp);
1955 count = &hw_stats->xdp_stats.rx_xdp_pass;
1958 if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1963 count = &hw_stats->xdp_stats.rx_xdp_redirect;
1966 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1968 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1969 count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1974 count = &hw_stats->xdp_stats.rx_xdp_tx;
1978 bpf_warn_invalid_xdp_action(dev, prog, act);
1981 trace_xdp_exception(dev, prog, act);
1987 page_pool_put_full_page(ring->page_pool,
1988 virt_to_head_page(xdp->data), true);
1991 u64_stats_update_begin(&hw_stats->syncp);
1992 *count = *count + 1;
1993 u64_stats_update_end(&hw_stats->syncp);
2000 static int mtk_poll_rx(struct napi_struct *napi, int budget,
2001 struct mtk_eth *eth)
2003 struct dim_sample dim_sample = {};
2004 struct mtk_rx_ring *ring;
2005 bool xdp_flush = false;
2007 struct sk_buff *skb;
2009 u8 *data, *new_data;
2010 struct mtk_rx_dma_v2 *rxd, trxd;
2011 int done = 0, bytes = 0;
2012 dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2014 while (done < budget) {
2015 unsigned int pktlen, *rxdcsum;
2016 struct net_device *netdev;
2020 ring = mtk_get_rx_ring(eth);
2021 if (unlikely(!ring))
2024 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
2025 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
2026 data = ring->data[idx];
2028 if (!mtk_rx_get_desc(eth, &trxd, rxd))
2031 /* find out which mac the packet come from. values start at 1 */
2032 if (mtk_is_netsys_v2_or_greater(eth)) {
2033 u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
2046 } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
2047 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2048 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2051 if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
2055 netdev = eth->netdev[mac];
2057 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
2060 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2062 /* alloc new buffer */
2063 if (ring->page_pool) {
2064 struct page *page = virt_to_head_page(data);
2065 struct xdp_buff xdp;
2068 new_data = mtk_page_pool_get_buff(ring->page_pool,
2071 if (unlikely(!new_data)) {
2072 netdev->stats.rx_dropped++;
2076 dma_sync_single_for_cpu(eth->dma_dev,
2077 page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
2078 pktlen, page_pool_get_dma_dir(ring->page_pool));
2080 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2081 xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2083 xdp_buff_clear_frags_flag(&xdp);
2085 ret = mtk_xdp_run(eth, ring, &xdp, netdev);
2086 if (ret == XDP_REDIRECT)
2089 if (ret != XDP_PASS)
2092 skb = build_skb(data, PAGE_SIZE);
2093 if (unlikely(!skb)) {
2094 page_pool_put_full_page(ring->page_pool,
2096 netdev->stats.rx_dropped++;
2100 skb_reserve(skb, xdp.data - xdp.data_hard_start);
2101 skb_put(skb, xdp.data_end - xdp.data);
2102 skb_mark_for_recycle(skb);
2104 if (ring->frag_size <= PAGE_SIZE)
2105 new_data = napi_alloc_frag(ring->frag_size);
2107 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2109 if (unlikely(!new_data)) {
2110 netdev->stats.rx_dropped++;
2114 dma_addr = dma_map_single(eth->dma_dev,
2115 new_data + NET_SKB_PAD + eth->ip_align,
2116 ring->buf_size, DMA_FROM_DEVICE);
2117 if (unlikely(dma_mapping_error(eth->dma_dev,
2119 skb_free_frag(new_data);
2120 netdev->stats.rx_dropped++;
2124 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2125 addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
2127 dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
2128 ring->buf_size, DMA_FROM_DEVICE);
2130 skb = build_skb(data, ring->frag_size);
2131 if (unlikely(!skb)) {
2132 netdev->stats.rx_dropped++;
2133 skb_free_frag(data);
2137 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2138 skb_put(skb, pktlen);
2144 if (mtk_is_netsys_v2_or_greater(eth)) {
2145 reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2146 hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2147 if (hash != MTK_RXD5_FOE_ENTRY)
2148 skb_set_hash(skb, jhash_1word(hash, 0),
2150 rxdcsum = &trxd.rxd3;
2152 reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2153 hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2154 if (hash != MTK_RXD4_FOE_ENTRY)
2155 skb_set_hash(skb, jhash_1word(hash, 0),
2157 rxdcsum = &trxd.rxd4;
2160 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
2161 skb->ip_summed = CHECKSUM_UNNECESSARY;
2163 skb_checksum_none_assert(skb);
2164 skb->protocol = eth_type_trans(skb, netdev);
2166 /* When using VLAN untagging in combination with DSA, the
2167 * hardware treats the MTK special tag as a VLAN and untags it.
2169 if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2170 netdev_uses_dsa(netdev)) {
2171 unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2173 if (port < ARRAY_SIZE(eth->dsa_meta) &&
2174 eth->dsa_meta[port])
2175 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst);
2178 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2179 mtk_ppe_check_skb(eth->ppe[0], skb, hash);
2181 skb_record_rx_queue(skb, 0);
2182 napi_gro_receive(napi, skb);
2185 ring->data[idx] = new_data;
2186 rxd->rxd1 = (unsigned int)dma_addr;
2188 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2189 rxd->rxd2 = RX_DMA_LSO;
2191 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2193 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
2194 likely(dma_addr != DMA_MAPPING_ERROR))
2195 rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2197 ring->calc_idx = idx;
2203 /* make sure that all changes to the dma ring are flushed before
2207 mtk_update_rx_cpu_idx(eth);
2210 eth->rx_packets += done;
2211 eth->rx_bytes += bytes;
2212 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2214 net_dim(ð->rx_dim, dim_sample);
2222 struct mtk_poll_state {
2223 struct netdev_queue *txq;
2230 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2231 struct sk_buff *skb)
2233 struct netdev_queue *txq;
2234 struct net_device *dev;
2235 unsigned int bytes = skb->len;
2239 eth->tx_bytes += bytes;
2241 dev = eth->netdev[mac];
2245 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2246 if (state->txq == txq) {
2248 state->bytes += bytes;
2253 netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2257 state->bytes = bytes;
2260 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2261 struct mtk_poll_state *state)
2263 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2264 struct mtk_tx_ring *ring = ð->tx_ring;
2265 struct mtk_tx_buf *tx_buf;
2266 struct xdp_frame_bulk bq;
2267 struct mtk_tx_dma *desc;
2270 cpu = ring->last_free_ptr;
2271 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2273 desc = mtk_qdma_phys_to_virt(ring, cpu);
2274 xdp_frame_bulk_init(&bq);
2276 while ((cpu != dma) && budget) {
2277 u32 next_cpu = desc->txd2;
2279 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2280 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2283 tx_buf = mtk_desc_to_tx_buf(ring, desc,
2284 eth->soc->txrx.txd_size);
2288 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2289 if (tx_buf->type == MTK_TYPE_SKB)
2290 mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2295 mtk_tx_unmap(eth, tx_buf, &bq, true);
2297 ring->last_free = desc;
2298 atomic_inc(&ring->free_count);
2302 xdp_flush_frame_bulk(&bq);
2304 ring->last_free_ptr = cpu;
2305 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2310 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2311 struct mtk_poll_state *state)
2313 struct mtk_tx_ring *ring = ð->tx_ring;
2314 struct mtk_tx_buf *tx_buf;
2315 struct xdp_frame_bulk bq;
2316 struct mtk_tx_dma *desc;
2319 cpu = ring->cpu_idx;
2320 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2321 xdp_frame_bulk_init(&bq);
2323 while ((cpu != dma) && budget) {
2324 tx_buf = &ring->buf[cpu];
2328 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2329 if (tx_buf->type == MTK_TYPE_SKB)
2330 mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2333 mtk_tx_unmap(eth, tx_buf, &bq, true);
2335 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2336 ring->last_free = desc;
2337 atomic_inc(&ring->free_count);
2339 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2341 xdp_flush_frame_bulk(&bq);
2343 ring->cpu_idx = cpu;
2348 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2350 struct mtk_tx_ring *ring = ð->tx_ring;
2351 struct dim_sample dim_sample = {};
2352 struct mtk_poll_state state = {};
2354 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2355 budget = mtk_poll_tx_qdma(eth, budget, &state);
2357 budget = mtk_poll_tx_pdma(eth, budget, &state);
2360 netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2362 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2364 net_dim(ð->tx_dim, dim_sample);
2366 if (mtk_queue_stopped(eth) &&
2367 (atomic_read(&ring->free_count) > ring->thresh))
2368 mtk_wake_queue(eth);
2373 static void mtk_handle_status_irq(struct mtk_eth *eth)
2375 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2377 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2378 mtk_stats_update(eth);
2379 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2384 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2386 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2387 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2390 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2391 mtk_handle_status_irq(eth);
2392 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2393 tx_done = mtk_poll_tx(eth, budget);
2395 if (unlikely(netif_msg_intr(eth))) {
2397 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2398 mtk_r32(eth, reg_map->tx_irq_status),
2399 mtk_r32(eth, reg_map->tx_irq_mask));
2402 if (tx_done == budget)
2405 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2408 if (napi_complete_done(napi, tx_done))
2409 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2414 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2416 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2417 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2418 int rx_done_total = 0;
2420 mtk_handle_status_irq(eth);
2425 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2426 reg_map->pdma.irq_status);
2427 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2428 rx_done_total += rx_done;
2430 if (unlikely(netif_msg_intr(eth))) {
2432 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2433 mtk_r32(eth, reg_map->pdma.irq_status),
2434 mtk_r32(eth, reg_map->pdma.irq_mask));
2437 if (rx_done_total == budget)
2440 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
2441 eth->soc->txrx.rx_irq_done_mask);
2443 if (napi_complete_done(napi, rx_done_total))
2444 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2446 return rx_done_total;
2449 static int mtk_tx_alloc(struct mtk_eth *eth)
2451 const struct mtk_soc_data *soc = eth->soc;
2452 struct mtk_tx_ring *ring = ð->tx_ring;
2453 int i, sz = soc->txrx.txd_size;
2454 struct mtk_tx_dma_v2 *txd;
2458 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2459 ring_size = MTK_QDMA_RING_SIZE;
2461 ring_size = MTK_DMA_SIZE;
2463 ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2468 if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
2469 ring->dma = eth->sram_base + ring_size * sz;
2470 ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
2472 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2473 &ring->phys, GFP_KERNEL);
2479 for (i = 0; i < ring_size; i++) {
2480 int next = (i + 1) % ring_size;
2481 u32 next_ptr = ring->phys + next * sz;
2483 txd = ring->dma + i * sz;
2484 txd->txd2 = next_ptr;
2485 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2487 if (mtk_is_netsys_v2_or_greater(eth)) {
2495 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2496 * only as the framework. The real HW descriptors are the PDMA
2497 * descriptors in ring->dma_pdma.
2499 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2500 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2501 &ring->phys_pdma, GFP_KERNEL);
2502 if (!ring->dma_pdma)
2505 for (i = 0; i < ring_size; i++) {
2506 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2507 ring->dma_pdma[i].txd4 = 0;
2511 ring->dma_size = ring_size;
2512 atomic_set(&ring->free_count, ring_size - 2);
2513 ring->next_free = ring->dma;
2514 ring->last_free = (void *)txd;
2515 ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2516 ring->thresh = MAX_SKB_FRAGS;
2518 /* make sure that all changes to the dma ring are flushed before we
2523 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2524 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2525 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2527 ring->phys + ((ring_size - 1) * sz),
2528 soc->reg_map->qdma.crx_ptr);
2529 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2531 for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2532 val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2533 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2535 val = MTK_QTX_SCH_MIN_RATE_EN |
2536 /* minimum: 10 Mbps */
2537 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2538 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2539 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2540 if (mtk_is_netsys_v1(eth))
2541 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2542 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2543 ofs += MTK_QTX_OFFSET;
2545 val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2546 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2547 if (mtk_is_netsys_v2_or_greater(eth))
2548 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2550 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2551 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2552 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2553 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2562 static void mtk_tx_clean(struct mtk_eth *eth)
2564 const struct mtk_soc_data *soc = eth->soc;
2565 struct mtk_tx_ring *ring = ð->tx_ring;
2569 for (i = 0; i < ring->dma_size; i++)
2570 mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2574 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
2575 dma_free_coherent(eth->dma_dev,
2576 ring->dma_size * soc->txrx.txd_size,
2577 ring->dma, ring->phys);
2581 if (ring->dma_pdma) {
2582 dma_free_coherent(eth->dma_dev,
2583 ring->dma_size * soc->txrx.txd_size,
2584 ring->dma_pdma, ring->phys_pdma);
2585 ring->dma_pdma = NULL;
2589 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2591 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2592 struct mtk_rx_ring *ring;
2593 int rx_data_len, rx_dma_size, tx_ring_size;
2596 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2597 tx_ring_size = MTK_QDMA_RING_SIZE;
2599 tx_ring_size = MTK_DMA_SIZE;
2601 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2604 ring = ð->rx_ring_qdma;
2606 ring = ð->rx_ring[ring_no];
2609 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2610 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2611 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2613 rx_data_len = ETH_DATA_LEN;
2614 rx_dma_size = MTK_DMA_SIZE;
2617 ring->frag_size = mtk_max_frag_size(rx_data_len);
2618 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2619 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2624 if (mtk_page_pool_enabled(eth)) {
2625 struct page_pool *pp;
2627 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2632 ring->page_pool = pp;
2635 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
2636 rx_flag != MTK_RX_FLAGS_NORMAL) {
2637 ring->dma = dma_alloc_coherent(eth->dma_dev,
2638 rx_dma_size * eth->soc->txrx.rxd_size,
2639 &ring->phys, GFP_KERNEL);
2641 struct mtk_tx_ring *tx_ring = ð->tx_ring;
2643 ring->dma = tx_ring->dma + tx_ring_size *
2644 eth->soc->txrx.txd_size * (ring_no + 1);
2645 ring->phys = tx_ring->phys + tx_ring_size *
2646 eth->soc->txrx.txd_size * (ring_no + 1);
2652 for (i = 0; i < rx_dma_size; i++) {
2653 struct mtk_rx_dma_v2 *rxd;
2654 dma_addr_t dma_addr;
2657 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2658 if (ring->page_pool) {
2659 data = mtk_page_pool_get_buff(ring->page_pool,
2660 &dma_addr, GFP_KERNEL);
2664 if (ring->frag_size <= PAGE_SIZE)
2665 data = netdev_alloc_frag(ring->frag_size);
2667 data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2672 dma_addr = dma_map_single(eth->dma_dev,
2673 data + NET_SKB_PAD + eth->ip_align,
2674 ring->buf_size, DMA_FROM_DEVICE);
2675 if (unlikely(dma_mapping_error(eth->dma_dev,
2677 skb_free_frag(data);
2681 rxd->rxd1 = (unsigned int)dma_addr;
2682 ring->data[i] = data;
2684 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2685 rxd->rxd2 = RX_DMA_LSO;
2687 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2689 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2690 rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2694 if (mtk_is_netsys_v2_or_greater(eth)) {
2702 ring->dma_size = rx_dma_size;
2703 ring->calc_idx_update = false;
2704 ring->calc_idx = rx_dma_size - 1;
2705 if (rx_flag == MTK_RX_FLAGS_QDMA)
2706 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2707 ring_no * MTK_QRX_OFFSET;
2709 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2710 ring_no * MTK_QRX_OFFSET;
2711 /* make sure that all changes to the dma ring are flushed before we
2716 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2717 mtk_w32(eth, ring->phys,
2718 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2719 mtk_w32(eth, rx_dma_size,
2720 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2721 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2722 reg_map->qdma.rst_idx);
2724 mtk_w32(eth, ring->phys,
2725 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2726 mtk_w32(eth, rx_dma_size,
2727 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2728 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2729 reg_map->pdma.rst_idx);
2731 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2736 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
2741 if (ring->data && ring->dma) {
2742 for (i = 0; i < ring->dma_size; i++) {
2743 struct mtk_rx_dma *rxd;
2748 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2752 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2753 addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
2755 dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
2756 ring->buf_size, DMA_FROM_DEVICE);
2757 mtk_rx_put_buff(ring, ring->data[i], false);
2763 if (!in_sram && ring->dma) {
2764 dma_free_coherent(eth->dma_dev,
2765 ring->dma_size * eth->soc->txrx.rxd_size,
2766 ring->dma, ring->phys);
2770 if (ring->page_pool) {
2771 if (xdp_rxq_info_is_reg(&ring->xdp_q))
2772 xdp_rxq_info_unreg(&ring->xdp_q);
2773 page_pool_destroy(ring->page_pool);
2774 ring->page_pool = NULL;
2778 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2781 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2782 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2784 /* set LRO rings to auto-learn modes */
2785 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2787 /* validate LRO ring */
2788 ring_ctrl_dw2 |= MTK_RING_VLD;
2790 /* set AGE timer (unit: 20us) */
2791 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2792 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2794 /* set max AGG timer (unit: 20us) */
2795 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2797 /* set max LRO AGG count */
2798 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2799 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2801 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2802 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2803 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2804 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2807 /* IPv4 checksum update enable */
2808 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2810 /* switch priority comparison to packet count mode */
2811 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2813 /* bandwidth threshold setting */
2814 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2816 /* auto-learn score delta setting */
2817 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2819 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2820 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2821 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2823 /* set HW LRO mode & the max aggregation count for rx packets */
2824 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2826 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2827 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2830 lro_ctrl_dw0 |= MTK_LRO_EN;
2832 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2833 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2838 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2843 /* relinquish lro rings, flush aggregated packets */
2844 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2846 /* wait for relinquishments done */
2847 for (i = 0; i < 10; i++) {
2848 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2849 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2856 /* invalidate lro rings */
2857 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2858 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2860 /* disable HW LRO */
2861 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2864 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2868 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2870 /* invalidate the IP setting */
2871 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2873 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2875 /* validate the IP setting */
2876 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2879 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2883 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2885 /* invalidate the IP setting */
2886 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2888 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2891 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2896 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2897 if (mac->hwlro_ip[i])
2904 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2905 struct ethtool_rxnfc *cmd)
2907 struct ethtool_rx_flow_spec *fsp =
2908 (struct ethtool_rx_flow_spec *)&cmd->fs;
2909 struct mtk_mac *mac = netdev_priv(dev);
2910 struct mtk_eth *eth = mac->hw;
2913 if ((fsp->flow_type != TCP_V4_FLOW) ||
2914 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2915 (fsp->location > 1))
2918 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2919 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2921 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2923 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2928 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2929 struct ethtool_rxnfc *cmd)
2931 struct ethtool_rx_flow_spec *fsp =
2932 (struct ethtool_rx_flow_spec *)&cmd->fs;
2933 struct mtk_mac *mac = netdev_priv(dev);
2934 struct mtk_eth *eth = mac->hw;
2937 if (fsp->location > 1)
2940 mac->hwlro_ip[fsp->location] = 0;
2941 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2943 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2945 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2950 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2952 struct mtk_mac *mac = netdev_priv(dev);
2953 struct mtk_eth *eth = mac->hw;
2956 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2957 mac->hwlro_ip[i] = 0;
2958 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2960 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2963 mac->hwlro_ip_cnt = 0;
2966 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2967 struct ethtool_rxnfc *cmd)
2969 struct mtk_mac *mac = netdev_priv(dev);
2970 struct ethtool_rx_flow_spec *fsp =
2971 (struct ethtool_rx_flow_spec *)&cmd->fs;
2973 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2976 /* only tcp dst ipv4 is meaningful, others are meaningless */
2977 fsp->flow_type = TCP_V4_FLOW;
2978 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2979 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2981 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2982 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2983 fsp->h_u.tcp_ip4_spec.psrc = 0;
2984 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2985 fsp->h_u.tcp_ip4_spec.pdst = 0;
2986 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2987 fsp->h_u.tcp_ip4_spec.tos = 0;
2988 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2993 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2994 struct ethtool_rxnfc *cmd,
2997 struct mtk_mac *mac = netdev_priv(dev);
3001 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3002 if (cnt == cmd->rule_cnt)
3005 if (mac->hwlro_ip[i]) {
3011 cmd->rule_cnt = cnt;
3016 static netdev_features_t mtk_fix_features(struct net_device *dev,
3017 netdev_features_t features)
3019 if (!(features & NETIF_F_LRO)) {
3020 struct mtk_mac *mac = netdev_priv(dev);
3021 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3024 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3026 features |= NETIF_F_LRO;
3033 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3035 netdev_features_t diff = dev->features ^ features;
3037 if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
3038 mtk_hwlro_netdev_disable(dev);
3043 /* wait for DMA to finish whatever it is doing before we start using it again */
3044 static int mtk_dma_busy_wait(struct mtk_eth *eth)
3050 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3051 reg = eth->soc->reg_map->qdma.glo_cfg;
3053 reg = eth->soc->reg_map->pdma.glo_cfg;
3055 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
3056 !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
3057 5, MTK_DMA_BUSY_TIMEOUT_US);
3059 dev_err(eth->dev, "DMA init timeout\n");
3064 static int mtk_dma_init(struct mtk_eth *eth)
3069 if (mtk_dma_busy_wait(eth))
3072 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3073 /* QDMA needs scratch memory for internal reordering of the
3076 err = mtk_init_fq_dma(eth);
3081 err = mtk_tx_alloc(eth);
3085 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3086 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3091 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3096 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3097 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3101 err = mtk_hwlro_rx_init(eth);
3106 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3107 /* Enable random early drop and set drop threshold
3110 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3111 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3112 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3118 static void mtk_dma_free(struct mtk_eth *eth)
3120 const struct mtk_soc_data *soc = eth->soc;
3123 for (i = 0; i < MTK_MAX_DEVS; i++)
3125 netdev_reset_queue(eth->netdev[i]);
3126 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
3127 dma_free_coherent(eth->dma_dev,
3128 MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
3129 eth->scratch_ring, eth->phy_scratch_ring);
3130 eth->scratch_ring = NULL;
3131 eth->phy_scratch_ring = 0;
3134 mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
3135 mtk_rx_clean(eth, ð->rx_ring_qdma, false);
3138 mtk_hwlro_rx_uninit(eth);
3139 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3140 mtk_rx_clean(eth, ð->rx_ring[i], false);
3143 kfree(eth->scratch_head);
3146 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3148 u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3150 return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3151 (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3152 (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3155 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3157 struct mtk_mac *mac = netdev_priv(dev);
3158 struct mtk_eth *eth = mac->hw;
3160 if (test_bit(MTK_RESETTING, ð->state))
3163 if (!mtk_hw_reset_check(eth))
3166 eth->netdev[mac->id]->stats.tx_errors++;
3167 netif_err(eth, tx_err, dev, "transmit timed out\n");
3169 schedule_work(ð->pending_work);
3172 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3174 struct mtk_eth *eth = _eth;
3177 if (likely(napi_schedule_prep(ð->rx_napi))) {
3178 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3179 __napi_schedule(ð->rx_napi);
3185 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3187 struct mtk_eth *eth = _eth;
3190 if (likely(napi_schedule_prep(ð->tx_napi))) {
3191 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3192 __napi_schedule(ð->tx_napi);
3198 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3200 struct mtk_eth *eth = _eth;
3201 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3203 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3204 eth->soc->txrx.rx_irq_done_mask) {
3205 if (mtk_r32(eth, reg_map->pdma.irq_status) &
3206 eth->soc->txrx.rx_irq_done_mask)
3207 mtk_handle_irq_rx(irq, _eth);
3209 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3210 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3211 mtk_handle_irq_tx(irq, _eth);
3217 #ifdef CONFIG_NET_POLL_CONTROLLER
3218 static void mtk_poll_controller(struct net_device *dev)
3220 struct mtk_mac *mac = netdev_priv(dev);
3221 struct mtk_eth *eth = mac->hw;
3223 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3224 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3225 mtk_handle_irq_rx(eth->irq[2], dev);
3226 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3227 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
3231 static int mtk_start_dma(struct mtk_eth *eth)
3233 u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3234 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3237 err = mtk_dma_init(eth);
3243 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3244 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3245 val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3246 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3247 MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3249 if (mtk_is_netsys_v2_or_greater(eth))
3250 val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3251 MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3252 MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3254 val |= MTK_RX_BT_32DWORDS;
3255 mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3258 MTK_RX_DMA_EN | rx_2b_offset |
3259 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3260 reg_map->pdma.glo_cfg);
3262 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3263 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3264 reg_map->pdma.glo_cfg);
3270 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
3274 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3277 for (i = 0; i < MTK_MAX_DEVS; i++) {
3280 if (!eth->netdev[i])
3283 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
3285 /* default setup the forward port to send frame to PDMA */
3288 /* Enable RX checksum */
3289 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3293 if (netdev_uses_dsa(eth->netdev[i]))
3294 val |= MTK_GDMA_SPECIAL_TAG;
3296 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
3298 /* Reset and enable PSE */
3299 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3300 mtk_w32(eth, 0, MTK_RST_GL);
3304 static bool mtk_uses_dsa(struct net_device *dev)
3306 #if IS_ENABLED(CONFIG_NET_DSA)
3307 return netdev_uses_dsa(dev) &&
3308 dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3314 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3316 struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3317 struct mtk_eth *eth = mac->hw;
3318 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3319 struct ethtool_link_ksettings s;
3320 struct net_device *ldev;
3321 struct list_head *iter;
3322 struct dsa_port *dp;
3324 if (event != NETDEV_CHANGE)
3327 netdev_for_each_lower_dev(dev, ldev, iter) {
3328 if (netdev_priv(ldev) == mac)
3335 if (!dsa_user_dev_check(dev))
3338 if (__ethtool_get_link_ksettings(dev, &s))
3341 if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3344 dp = dsa_port_from_netdev(dev);
3345 if (dp->index >= MTK_QDMA_NUM_QUEUES)
3348 if (mac->speed > 0 && mac->speed <= s.base.speed)
3351 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3356 static int mtk_open(struct net_device *dev)
3358 struct mtk_mac *mac = netdev_priv(dev);
3359 struct mtk_eth *eth = mac->hw;
3362 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3364 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3369 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3370 if (!refcount_read(ð->dma_refcnt)) {
3371 const struct mtk_soc_data *soc = eth->soc;
3375 err = mtk_start_dma(eth);
3377 phylink_disconnect_phy(mac->phylink);
3381 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3382 mtk_ppe_start(eth->ppe[i]);
3384 gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
3386 mtk_gdm_config(eth, gdm_config);
3388 napi_enable(ð->tx_napi);
3389 napi_enable(ð->rx_napi);
3390 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3391 mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
3392 refcount_set(ð->dma_refcnt, 1);
3395 refcount_inc(ð->dma_refcnt);
3397 phylink_start(mac->phylink);
3398 netif_tx_start_all_queues(dev);
3400 if (mtk_is_netsys_v2_or_greater(eth))
3403 if (mtk_uses_dsa(dev) && !eth->prog) {
3404 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3405 struct metadata_dst *md_dst = eth->dsa_meta[i];
3410 md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3415 md_dst->u.port_info.port_id = i;
3416 eth->dsa_meta[i] = md_dst;
3419 /* Hardware DSA untagging and VLAN RX offloading need to be
3420 * disabled if at least one MAC does not use DSA.
3422 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3424 val &= ~MTK_CDMP_STAG_EN;
3425 mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3427 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3433 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3438 /* stop the dma engine */
3439 spin_lock_bh(ð->page_lock);
3440 val = mtk_r32(eth, glo_cfg);
3441 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3443 spin_unlock_bh(ð->page_lock);
3445 /* wait for dma stop */
3446 for (i = 0; i < 10; i++) {
3447 val = mtk_r32(eth, glo_cfg);
3448 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3456 static int mtk_stop(struct net_device *dev)
3458 struct mtk_mac *mac = netdev_priv(dev);
3459 struct mtk_eth *eth = mac->hw;
3462 phylink_stop(mac->phylink);
3464 netif_tx_disable(dev);
3466 phylink_disconnect_phy(mac->phylink);
3468 /* only shutdown DMA if this is the last user */
3469 if (!refcount_dec_and_test(ð->dma_refcnt))
3472 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3474 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3475 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3476 napi_disable(ð->tx_napi);
3477 napi_disable(ð->rx_napi);
3479 cancel_work_sync(ð->rx_dim.work);
3480 cancel_work_sync(ð->tx_dim.work);
3482 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3483 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3484 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3488 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3489 mtk_ppe_stop(eth->ppe[i]);
3494 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3495 struct netlink_ext_ack *extack)
3497 struct mtk_mac *mac = netdev_priv(dev);
3498 struct mtk_eth *eth = mac->hw;
3499 struct bpf_prog *old_prog;
3503 NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3507 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3508 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3512 need_update = !!eth->prog != !!prog;
3513 if (netif_running(dev) && need_update)
3516 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3518 bpf_prog_put(old_prog);
3520 if (netif_running(dev) && need_update)
3521 return mtk_open(dev);
3526 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3528 switch (xdp->command) {
3529 case XDP_SETUP_PROG:
3530 return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3536 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3538 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3542 usleep_range(1000, 1100);
3543 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3549 static void mtk_clk_disable(struct mtk_eth *eth)
3553 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3554 clk_disable_unprepare(eth->clks[clk]);
3557 static int mtk_clk_enable(struct mtk_eth *eth)
3561 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3562 ret = clk_prepare_enable(eth->clks[clk]);
3564 goto err_disable_clks;
3571 clk_disable_unprepare(eth->clks[clk]);
3576 static void mtk_dim_rx(struct work_struct *work)
3578 struct dim *dim = container_of(work, struct dim, work);
3579 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3580 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3581 struct dim_cq_moder cur_profile;
3584 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3586 spin_lock_bh(ð->dim_lock);
3588 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3589 val &= MTK_PDMA_DELAY_TX_MASK;
3590 val |= MTK_PDMA_DELAY_RX_EN;
3592 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3593 val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3595 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3596 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3598 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3599 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3600 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3602 spin_unlock_bh(ð->dim_lock);
3604 dim->state = DIM_START_MEASURE;
3607 static void mtk_dim_tx(struct work_struct *work)
3609 struct dim *dim = container_of(work, struct dim, work);
3610 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3611 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3612 struct dim_cq_moder cur_profile;
3615 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3617 spin_lock_bh(ð->dim_lock);
3619 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3620 val &= MTK_PDMA_DELAY_RX_MASK;
3621 val |= MTK_PDMA_DELAY_TX_EN;
3623 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3624 val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3626 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3627 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3629 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3630 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3631 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3633 spin_unlock_bh(ð->dim_lock);
3635 dim->state = DIM_START_MEASURE;
3638 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3640 struct mtk_eth *eth = mac->hw;
3641 u32 mcr_cur, mcr_new;
3643 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3646 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3647 mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3650 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3651 else if (val <= 1536)
3652 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3653 else if (val <= 1552)
3654 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3656 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3658 if (mcr_new != mcr_cur)
3659 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3662 static void mtk_hw_reset(struct mtk_eth *eth)
3666 if (mtk_is_netsys_v2_or_greater(eth))
3667 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3669 if (mtk_is_netsys_v3_or_greater(eth)) {
3670 val = RSTCTRL_PPE0_V3;
3672 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3673 val |= RSTCTRL_PPE1_V3;
3675 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3676 val |= RSTCTRL_PPE2;
3678 val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3679 } else if (mtk_is_netsys_v2_or_greater(eth)) {
3680 val = RSTCTRL_PPE0_V2;
3682 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3683 val |= RSTCTRL_PPE1;
3688 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3690 if (mtk_is_netsys_v3_or_greater(eth))
3691 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3693 else if (mtk_is_netsys_v2_or_greater(eth))
3694 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3698 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3702 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3706 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3710 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3712 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3713 val & RSTCTRL_FE, 1, 1000)) {
3714 dev_err(eth->dev, "warm reset failed\n");
3719 if (mtk_is_netsys_v3_or_greater(eth)) {
3720 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
3721 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3722 rst_mask |= RSTCTRL_PPE1_V3;
3723 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3724 rst_mask |= RSTCTRL_PPE2;
3726 rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3727 } else if (mtk_is_netsys_v2_or_greater(eth)) {
3728 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3729 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3730 rst_mask |= RSTCTRL_PPE1;
3732 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3735 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3738 val = mtk_hw_reset_read(eth);
3739 if (!(val & rst_mask))
3740 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3743 rst_mask |= RSTCTRL_FE;
3744 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3747 val = mtk_hw_reset_read(eth);
3749 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3753 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3755 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3756 bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3757 bool oq_hang, cdm1_busy, adma_busy;
3758 bool wtx_busy, cdm_full, oq_free;
3759 u32 wdidx, val, gdm1_fc, gdm2_fc;
3760 bool qfsm_hang, qfwd_hang;
3763 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3766 /* WDMA sanity checks */
3767 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3769 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3770 wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3772 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3773 cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3775 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3776 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3777 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3779 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3780 if (++eth->reset.wdma_hang_count > 2) {
3781 eth->reset.wdma_hang_count = 0;
3787 /* QDMA sanity checks */
3788 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3789 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3791 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3792 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3793 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3794 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3795 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3796 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3798 if (qfsm_hang && qfwd_hang &&
3799 ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3800 (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3801 if (++eth->reset.qdma_hang_count > 2) {
3802 eth->reset.qdma_hang_count = 0;
3808 /* ADMA sanity checks */
3809 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3810 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3811 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3812 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3814 if (oq_hang && cdm1_busy && adma_busy) {
3815 if (++eth->reset.adma_hang_count > 2) {
3816 eth->reset.adma_hang_count = 0;
3822 eth->reset.wdma_hang_count = 0;
3823 eth->reset.qdma_hang_count = 0;
3824 eth->reset.adma_hang_count = 0;
3826 eth->reset.wdidx = wdidx;
3831 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3833 struct delayed_work *del_work = to_delayed_work(work);
3834 struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3835 reset.monitor_work);
3837 if (test_bit(MTK_RESETTING, ð->state))
3840 /* DMA stuck checks */
3841 if (mtk_hw_check_dma_hang(eth))
3842 schedule_work(ð->pending_work);
3845 schedule_delayed_work(ð->reset.monitor_work,
3846 MTK_DMA_MONITOR_TIMEOUT);
3849 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3851 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3852 ETHSYS_DMA_AG_MAP_PPE;
3853 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3856 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state))
3860 pm_runtime_enable(eth->dev);
3861 pm_runtime_get_sync(eth->dev);
3863 ret = mtk_clk_enable(eth);
3865 goto err_disable_pm;
3869 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3870 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3872 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3873 ret = device_reset(eth->dev);
3875 dev_err(eth->dev, "MAC reset failed!\n");
3876 goto err_disable_pm;
3879 /* set interrupt delays based on current Net DIM sample */
3880 mtk_dim_rx(ð->rx_dim.work);
3881 mtk_dim_tx(ð->tx_dim.work);
3883 /* disable delay and normal interrupt */
3884 mtk_tx_irq_disable(eth, ~0);
3885 mtk_rx_irq_disable(eth, ~0);
3893 mtk_hw_warm_reset(eth);
3897 if (mtk_is_netsys_v2_or_greater(eth)) {
3898 /* Set FE to PDMAv2 if necessary */
3899 val = mtk_r32(eth, MTK_FE_GLO_MISC);
3900 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
3904 /* Set GE2 driving and slew rate */
3905 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3908 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3911 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3914 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3915 * up with the more appropriate value when mtk_mac_config call is being
3918 for (i = 0; i < MTK_MAX_DEVS; i++) {
3919 struct net_device *dev = eth->netdev[i];
3924 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3925 mtk_set_mcr_max_rx(netdev_priv(dev),
3926 dev->mtu + MTK_RX_ETH_HLEN);
3929 /* Indicates CDM to parse the MTK special tag from CPU
3930 * which also is working out for untag packets.
3932 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3933 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3934 if (mtk_is_netsys_v1(eth)) {
3935 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3936 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3938 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3941 /* set interrupt delays based on current Net DIM sample */
3942 mtk_dim_rx(ð->rx_dim.work);
3943 mtk_dim_tx(ð->tx_dim.work);
3945 /* disable delay and normal interrupt */
3946 mtk_tx_irq_disable(eth, ~0);
3947 mtk_rx_irq_disable(eth, ~0);
3949 /* FE int grouping */
3950 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3951 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
3952 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3953 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
3954 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3956 if (mtk_is_netsys_v3_or_greater(eth)) {
3957 /* PSE should not drop port1, port8 and port9 packets */
3958 mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
3960 /* GDM and CDM Threshold */
3961 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
3962 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
3964 /* Disable GDM1 RX CRC stripping */
3965 mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
3967 /* PSE GDM3 MIB counter has incorrect hw default values,
3968 * so the driver ought to read clear the values beforehand
3969 * in case ethtool retrieve wrong mib values.
3971 for (i = 0; i < 0x80; i += 0x4)
3972 mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
3973 } else if (!mtk_is_netsys_v1(eth)) {
3974 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3975 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3977 /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
3978 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3980 /* PSE Free Queue Flow Control */
3981 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3983 /* PSE config input queue threshold */
3984 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3985 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3986 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3987 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3988 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3989 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3990 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3991 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3993 /* PSE config output queue threshold */
3994 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3995 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3996 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3997 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3998 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3999 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
4000 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
4001 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
4003 /* GDM and CDM Threshold */
4004 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4005 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4006 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4007 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4008 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4009 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4016 pm_runtime_put_sync(eth->dev);
4017 pm_runtime_disable(eth->dev);
4023 static int mtk_hw_deinit(struct mtk_eth *eth)
4025 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
4028 mtk_clk_disable(eth);
4030 pm_runtime_put_sync(eth->dev);
4031 pm_runtime_disable(eth->dev);
4036 static void mtk_uninit(struct net_device *dev)
4038 struct mtk_mac *mac = netdev_priv(dev);
4039 struct mtk_eth *eth = mac->hw;
4041 phylink_disconnect_phy(mac->phylink);
4042 mtk_tx_irq_disable(eth, ~0);
4043 mtk_rx_irq_disable(eth, ~0);
4046 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
4048 int length = new_mtu + MTK_RX_ETH_HLEN;
4049 struct mtk_mac *mac = netdev_priv(dev);
4050 struct mtk_eth *eth = mac->hw;
4052 if (rcu_access_pointer(eth->prog) &&
4053 length > MTK_PP_MAX_BUF_SIZE) {
4054 netdev_err(dev, "Invalid MTU for XDP mode\n");
4058 mtk_set_mcr_max_rx(mac, length);
4064 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4066 struct mtk_mac *mac = netdev_priv(dev);
4072 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4080 static void mtk_prepare_for_reset(struct mtk_eth *eth)
4085 /* set FE PPE ports link down */
4086 for (i = MTK_GMAC1_ID;
4087 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4089 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4090 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4091 val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4092 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4093 val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4094 mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4097 /* adjust PPE configurations to prepare for reset */
4098 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4099 mtk_ppe_prepare_reset(eth->ppe[i]);
4101 /* disable NETSYS interrupts */
4102 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
4104 /* force link down GMAC */
4105 for (i = 0; i < 2; i++) {
4106 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
4107 mtk_w32(eth, val, MTK_MAC_MCR(i));
4111 static void mtk_pending_work(struct work_struct *work)
4113 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4114 unsigned long restart = 0;
4119 set_bit(MTK_RESETTING, ð->state);
4121 mtk_prepare_for_reset(eth);
4123 /* Run again reset preliminary configuration in order to avoid any
4124 * possible race during FE reset since it can run releasing RTNL lock.
4126 mtk_prepare_for_reset(eth);
4128 /* stop all devices to make sure that dma is properly shut down */
4129 for (i = 0; i < MTK_MAX_DEVS; i++) {
4130 if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4133 mtk_stop(eth->netdev[i]);
4134 __set_bit(i, &restart);
4137 usleep_range(15000, 16000);
4140 pinctrl_select_state(eth->dev->pins->p,
4141 eth->dev->pins->default_state);
4142 mtk_hw_init(eth, true);
4144 /* restart DMA and enable IRQs */
4145 for (i = 0; i < MTK_MAX_DEVS; i++) {
4146 if (!eth->netdev[i] || !test_bit(i, &restart))
4149 if (mtk_open(eth->netdev[i])) {
4150 netif_alert(eth, ifup, eth->netdev[i],
4151 "Driver up/down cycle failed\n");
4152 dev_close(eth->netdev[i]);
4156 /* set FE PPE ports link up */
4157 for (i = MTK_GMAC1_ID;
4158 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4160 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4161 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4162 val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4163 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4164 val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4166 mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4169 clear_bit(MTK_RESETTING, ð->state);
4171 mtk_wed_fe_reset_complete();
4176 static int mtk_free_dev(struct mtk_eth *eth)
4180 for (i = 0; i < MTK_MAX_DEVS; i++) {
4181 if (!eth->netdev[i])
4183 free_netdev(eth->netdev[i]);
4186 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4187 if (!eth->dsa_meta[i])
4189 metadata_dst_free(eth->dsa_meta[i]);
4195 static int mtk_unreg_dev(struct mtk_eth *eth)
4199 for (i = 0; i < MTK_MAX_DEVS; i++) {
4200 struct mtk_mac *mac;
4201 if (!eth->netdev[i])
4203 mac = netdev_priv(eth->netdev[i]);
4204 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4205 unregister_netdevice_notifier(&mac->device_notifier);
4206 unregister_netdev(eth->netdev[i]);
4212 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4216 for (i = 0; i < MTK_MAX_DEVS; i++)
4217 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4220 static int mtk_cleanup(struct mtk_eth *eth)
4222 mtk_sgmii_destroy(eth);
4225 cancel_work_sync(ð->pending_work);
4226 cancel_delayed_work_sync(ð->reset.monitor_work);
4231 static int mtk_get_link_ksettings(struct net_device *ndev,
4232 struct ethtool_link_ksettings *cmd)
4234 struct mtk_mac *mac = netdev_priv(ndev);
4236 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4239 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4242 static int mtk_set_link_ksettings(struct net_device *ndev,
4243 const struct ethtool_link_ksettings *cmd)
4245 struct mtk_mac *mac = netdev_priv(ndev);
4247 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4250 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4253 static void mtk_get_drvinfo(struct net_device *dev,
4254 struct ethtool_drvinfo *info)
4256 struct mtk_mac *mac = netdev_priv(dev);
4258 strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4259 strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4260 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4263 static u32 mtk_get_msglevel(struct net_device *dev)
4265 struct mtk_mac *mac = netdev_priv(dev);
4267 return mac->hw->msg_enable;
4270 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4272 struct mtk_mac *mac = netdev_priv(dev);
4274 mac->hw->msg_enable = value;
4277 static int mtk_nway_reset(struct net_device *dev)
4279 struct mtk_mac *mac = netdev_priv(dev);
4281 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4287 return phylink_ethtool_nway_reset(mac->phylink);
4290 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4294 switch (stringset) {
4295 case ETH_SS_STATS: {
4296 struct mtk_mac *mac = netdev_priv(dev);
4298 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4299 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4300 data += ETH_GSTRING_LEN;
4302 if (mtk_page_pool_enabled(mac->hw))
4303 page_pool_ethtool_stats_get_strings(data);
4311 static int mtk_get_sset_count(struct net_device *dev, int sset)
4314 case ETH_SS_STATS: {
4315 int count = ARRAY_SIZE(mtk_ethtool_stats);
4316 struct mtk_mac *mac = netdev_priv(dev);
4318 if (mtk_page_pool_enabled(mac->hw))
4319 count += page_pool_ethtool_stats_get_count();
4327 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4329 struct page_pool_stats stats = {};
4332 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4333 struct mtk_rx_ring *ring = ð->rx_ring[i];
4335 if (!ring->page_pool)
4338 page_pool_get_stats(ring->page_pool, &stats);
4340 page_pool_ethtool_stats_get(data, &stats);
4343 static void mtk_get_ethtool_stats(struct net_device *dev,
4344 struct ethtool_stats *stats, u64 *data)
4346 struct mtk_mac *mac = netdev_priv(dev);
4347 struct mtk_hw_stats *hwstats = mac->hw_stats;
4348 u64 *data_src, *data_dst;
4352 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4355 if (netif_running(dev) && netif_device_present(dev)) {
4356 if (spin_trylock_bh(&hwstats->stats_lock)) {
4357 mtk_stats_update_mac(mac);
4358 spin_unlock_bh(&hwstats->stats_lock);
4362 data_src = (u64 *)hwstats;
4366 start = u64_stats_fetch_begin(&hwstats->syncp);
4368 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4369 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4370 if (mtk_page_pool_enabled(mac->hw))
4371 mtk_ethtool_pp_stats(mac->hw, data_dst);
4372 } while (u64_stats_fetch_retry(&hwstats->syncp, start));
4375 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4378 int ret = -EOPNOTSUPP;
4381 case ETHTOOL_GRXRINGS:
4382 if (dev->hw_features & NETIF_F_LRO) {
4383 cmd->data = MTK_MAX_RX_RING_NUM;
4387 case ETHTOOL_GRXCLSRLCNT:
4388 if (dev->hw_features & NETIF_F_LRO) {
4389 struct mtk_mac *mac = netdev_priv(dev);
4391 cmd->rule_cnt = mac->hwlro_ip_cnt;
4395 case ETHTOOL_GRXCLSRULE:
4396 if (dev->hw_features & NETIF_F_LRO)
4397 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4399 case ETHTOOL_GRXCLSRLALL:
4400 if (dev->hw_features & NETIF_F_LRO)
4401 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4411 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4413 int ret = -EOPNOTSUPP;
4416 case ETHTOOL_SRXCLSRLINS:
4417 if (dev->hw_features & NETIF_F_LRO)
4418 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4420 case ETHTOOL_SRXCLSRLDEL:
4421 if (dev->hw_features & NETIF_F_LRO)
4422 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4431 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4432 struct net_device *sb_dev)
4434 struct mtk_mac *mac = netdev_priv(dev);
4435 unsigned int queue = 0;
4437 if (netdev_uses_dsa(dev))
4438 queue = skb_get_queue_mapping(skb) + 3;
4442 if (queue >= dev->num_tx_queues)
4448 static const struct ethtool_ops mtk_ethtool_ops = {
4449 .get_link_ksettings = mtk_get_link_ksettings,
4450 .set_link_ksettings = mtk_set_link_ksettings,
4451 .get_drvinfo = mtk_get_drvinfo,
4452 .get_msglevel = mtk_get_msglevel,
4453 .set_msglevel = mtk_set_msglevel,
4454 .nway_reset = mtk_nway_reset,
4455 .get_link = ethtool_op_get_link,
4456 .get_strings = mtk_get_strings,
4457 .get_sset_count = mtk_get_sset_count,
4458 .get_ethtool_stats = mtk_get_ethtool_stats,
4459 .get_rxnfc = mtk_get_rxnfc,
4460 .set_rxnfc = mtk_set_rxnfc,
4463 static const struct net_device_ops mtk_netdev_ops = {
4464 .ndo_uninit = mtk_uninit,
4465 .ndo_open = mtk_open,
4466 .ndo_stop = mtk_stop,
4467 .ndo_start_xmit = mtk_start_xmit,
4468 .ndo_set_mac_address = mtk_set_mac_address,
4469 .ndo_validate_addr = eth_validate_addr,
4470 .ndo_eth_ioctl = mtk_do_ioctl,
4471 .ndo_change_mtu = mtk_change_mtu,
4472 .ndo_tx_timeout = mtk_tx_timeout,
4473 .ndo_get_stats64 = mtk_get_stats64,
4474 .ndo_fix_features = mtk_fix_features,
4475 .ndo_set_features = mtk_set_features,
4476 #ifdef CONFIG_NET_POLL_CONTROLLER
4477 .ndo_poll_controller = mtk_poll_controller,
4479 .ndo_setup_tc = mtk_eth_setup_tc,
4481 .ndo_xdp_xmit = mtk_xdp_xmit,
4482 .ndo_select_queue = mtk_select_queue,
4485 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4487 const __be32 *_id = of_get_property(np, "reg", NULL);
4488 phy_interface_t phy_mode;
4489 struct phylink *phylink;
4490 struct mtk_mac *mac;
4496 dev_err(eth->dev, "missing mac id\n");
4500 id = be32_to_cpup(_id);
4501 if (id >= MTK_MAX_DEVS) {
4502 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4506 if (eth->netdev[id]) {
4507 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4511 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4512 txqs = MTK_QDMA_NUM_QUEUES;
4514 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4515 if (!eth->netdev[id]) {
4516 dev_err(eth->dev, "alloc_etherdev failed\n");
4519 mac = netdev_priv(eth->netdev[id]);
4525 err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
4526 if (err == -EPROBE_DEFER)
4530 /* If the mac address is invalid, use random mac address */
4531 eth_hw_addr_random(eth->netdev[id]);
4532 dev_err(eth->dev, "generated random MAC address %pM\n",
4533 eth->netdev[id]->dev_addr);
4536 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4537 mac->hwlro_ip_cnt = 0;
4539 mac->hw_stats = devm_kzalloc(eth->dev,
4540 sizeof(*mac->hw_stats),
4542 if (!mac->hw_stats) {
4543 dev_err(eth->dev, "failed to allocate counter memory\n");
4547 spin_lock_init(&mac->hw_stats->stats_lock);
4548 u64_stats_init(&mac->hw_stats->syncp);
4550 if (mtk_is_netsys_v3_or_greater(eth))
4551 mac->hw_stats->reg_offset = id * 0x80;
4553 mac->hw_stats->reg_offset = id * 0x40;
4555 /* phylink create */
4556 err = of_get_phy_mode(np, &phy_mode);
4558 dev_err(eth->dev, "incorrect phy-mode\n");
4562 /* mac config is not set */
4563 mac->interface = PHY_INTERFACE_MODE_NA;
4564 mac->speed = SPEED_UNKNOWN;
4566 mac->phylink_config.dev = ð->netdev[id]->dev;
4567 mac->phylink_config.type = PHYLINK_NETDEV;
4568 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4569 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4571 /* MT7623 gmac0 is now missing its speed-specific PLL configuration
4572 * in its .mac_config method (since state->speed is not valid there.
4573 * Disable support for MII, GMII and RGMII.
4575 if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
4576 __set_bit(PHY_INTERFACE_MODE_MII,
4577 mac->phylink_config.supported_interfaces);
4578 __set_bit(PHY_INTERFACE_MODE_GMII,
4579 mac->phylink_config.supported_interfaces);
4581 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4582 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4585 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4586 __set_bit(PHY_INTERFACE_MODE_TRGMII,
4587 mac->phylink_config.supported_interfaces);
4589 /* TRGMII is not permitted on MT7621 if using DDR2 */
4590 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4591 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4592 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4593 if (val & SYSCFG_DRAM_TYPE_DDR2)
4594 __clear_bit(PHY_INTERFACE_MODE_TRGMII,
4595 mac->phylink_config.supported_interfaces);
4598 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4599 __set_bit(PHY_INTERFACE_MODE_SGMII,
4600 mac->phylink_config.supported_interfaces);
4601 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
4602 mac->phylink_config.supported_interfaces);
4603 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
4604 mac->phylink_config.supported_interfaces);
4607 if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4608 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
4609 id == MTK_GMAC1_ID) {
4610 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4613 phy_interface_zero(mac->phylink_config.supported_interfaces);
4614 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
4615 mac->phylink_config.supported_interfaces);
4618 phylink = phylink_create(&mac->phylink_config,
4619 of_fwnode_handle(mac->of_node),
4620 phy_mode, &mtk_phylink_ops);
4621 if (IS_ERR(phylink)) {
4622 err = PTR_ERR(phylink);
4626 mac->phylink = phylink;
4628 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4629 eth->netdev[id]->watchdog_timeo = 5 * HZ;
4630 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4631 eth->netdev[id]->base_addr = (unsigned long)eth->base;
4633 eth->netdev[id]->hw_features = eth->soc->hw_features;
4635 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4637 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4638 ~NETIF_F_HW_VLAN_CTAG_TX;
4639 eth->netdev[id]->features |= eth->soc->hw_features;
4640 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4642 eth->netdev[id]->irq = eth->irq[0];
4643 eth->netdev[id]->dev.of_node = np;
4645 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4646 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4648 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4650 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4651 mac->device_notifier.notifier_call = mtk_device_event;
4652 register_netdevice_notifier(&mac->device_notifier);
4655 if (mtk_page_pool_enabled(eth))
4656 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4657 NETDEV_XDP_ACT_REDIRECT |
4658 NETDEV_XDP_ACT_NDO_XMIT |
4659 NETDEV_XDP_ACT_NDO_XMIT_SG;
4664 free_netdev(eth->netdev[id]);
4668 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4670 struct net_device *dev, *tmp;
4671 LIST_HEAD(dev_list);
4676 for (i = 0; i < MTK_MAX_DEVS; i++) {
4677 dev = eth->netdev[i];
4679 if (!dev || !(dev->flags & IFF_UP))
4682 list_add_tail(&dev->close_list, &dev_list);
4685 dev_close_many(&dev_list, false);
4687 eth->dma_dev = dma_dev;
4689 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4690 list_del_init(&dev->close_list);
4691 dev_open(dev, NULL);
4697 static int mtk_sgmii_init(struct mtk_eth *eth)
4699 struct device_node *np;
4700 struct regmap *regmap;
4704 for (i = 0; i < MTK_MAX_DEVS; i++) {
4705 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
4709 regmap = syscon_node_to_regmap(np);
4711 if (of_property_read_bool(np, "mediatek,pnswap"))
4712 flags |= MTK_SGMII_FLAG_PN_SWAP;
4717 return PTR_ERR(regmap);
4719 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
4727 static int mtk_probe(struct platform_device *pdev)
4729 struct resource *res = NULL, *res_sram;
4730 struct device_node *mac_np;
4731 struct mtk_eth *eth;
4734 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4738 eth->soc = of_device_get_match_data(&pdev->dev);
4740 eth->dev = &pdev->dev;
4741 eth->dma_dev = &pdev->dev;
4742 eth->base = devm_platform_ioremap_resource(pdev, 0);
4743 if (IS_ERR(eth->base))
4744 return PTR_ERR(eth->base);
4746 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4747 eth->ip_align = NET_IP_ALIGN;
4749 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4750 /* SRAM is actual memory and supports transparent access just like DRAM.
4751 * Hence we don't require __iomem being set and don't need to use accessor
4752 * functions to read from or write to SRAM.
4754 if (mtk_is_netsys_v3_or_greater(eth)) {
4755 eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
4756 if (IS_ERR(eth->sram_base))
4757 return PTR_ERR(eth->sram_base);
4759 eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
4763 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
4764 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
4766 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4769 dev_err(&pdev->dev, "Wrong DMA config\n");
4774 spin_lock_init(ð->page_lock);
4775 spin_lock_init(ð->tx_irq_lock);
4776 spin_lock_init(ð->rx_irq_lock);
4777 spin_lock_init(ð->dim_lock);
4779 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4780 INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
4781 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work);
4783 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4784 INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
4786 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4787 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4789 if (IS_ERR(eth->ethsys)) {
4790 dev_err(&pdev->dev, "no ethsys regmap found\n");
4791 return PTR_ERR(eth->ethsys);
4795 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4796 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4797 "mediatek,infracfg");
4798 if (IS_ERR(eth->infra)) {
4799 dev_err(&pdev->dev, "no infracfg regmap found\n");
4800 return PTR_ERR(eth->infra);
4804 if (of_dma_is_coherent(pdev->dev.of_node)) {
4807 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4808 "cci-control-port");
4809 /* enable CPU/bus coherency */
4811 regmap_write(cci, 0, 3);
4814 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4815 err = mtk_sgmii_init(eth);
4821 if (eth->soc->required_pctl) {
4822 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4824 if (IS_ERR(eth->pctl)) {
4825 dev_err(&pdev->dev, "no pctl regmap found\n");
4826 err = PTR_ERR(eth->pctl);
4827 goto err_destroy_sgmii;
4831 if (mtk_is_netsys_v2_or_greater(eth)) {
4832 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4835 goto err_destroy_sgmii;
4837 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4838 if (mtk_is_netsys_v3_or_greater(eth)) {
4839 res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4842 goto err_destroy_sgmii;
4844 eth->phy_scratch_ring = res_sram->start;
4846 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
4851 if (eth->soc->offload_version) {
4853 struct device_node *np;
4854 phys_addr_t wdma_phy;
4857 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4860 np = of_parse_phandle(pdev->dev.of_node,
4865 wdma_base = eth->soc->reg_map->wdma_base[i];
4866 wdma_phy = res ? res->start + wdma_base : 0;
4867 mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4872 for (i = 0; i < 3; i++) {
4873 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4874 eth->irq[i] = eth->irq[0];
4876 eth->irq[i] = platform_get_irq(pdev, i);
4877 if (eth->irq[i] < 0) {
4878 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4883 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4884 eth->clks[i] = devm_clk_get(eth->dev,
4885 mtk_clks_source_name[i]);
4886 if (IS_ERR(eth->clks[i])) {
4887 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4888 err = -EPROBE_DEFER;
4891 if (eth->soc->required_clks & BIT(i)) {
4892 dev_err(&pdev->dev, "clock %s not found\n",
4893 mtk_clks_source_name[i]);
4897 eth->clks[i] = NULL;
4901 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4902 INIT_WORK(ð->pending_work, mtk_pending_work);
4904 err = mtk_hw_init(eth, false);
4908 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4910 for_each_child_of_node(pdev->dev.of_node, mac_np) {
4911 if (!of_device_is_compatible(mac_np,
4912 "mediatek,eth-mac"))
4915 if (!of_device_is_available(mac_np))
4918 err = mtk_add_mac(eth, mac_np);
4920 of_node_put(mac_np);
4925 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4926 err = devm_request_irq(eth->dev, eth->irq[0],
4928 dev_name(eth->dev), eth);
4930 err = devm_request_irq(eth->dev, eth->irq[1],
4931 mtk_handle_irq_tx, 0,
4932 dev_name(eth->dev), eth);
4936 err = devm_request_irq(eth->dev, eth->irq[2],
4937 mtk_handle_irq_rx, 0,
4938 dev_name(eth->dev), eth);
4943 /* No MT7628/88 support yet */
4944 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4945 err = mtk_mdio_init(eth);
4950 if (eth->soc->offload_version) {
4951 u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
4953 num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
4954 for (i = 0; i < num_ppe; i++) {
4955 u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
4957 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
4961 goto err_deinit_ppe;
4965 err = mtk_eth_offload_init(eth);
4967 goto err_deinit_ppe;
4970 for (i = 0; i < MTK_MAX_DEVS; i++) {
4971 if (!eth->netdev[i])
4974 err = register_netdev(eth->netdev[i]);
4976 dev_err(eth->dev, "error bringing up device\n");
4977 goto err_deinit_ppe;
4979 netif_info(eth, probe, eth->netdev[i],
4980 "mediatek frame engine at 0x%08lx, irq %d\n",
4981 eth->netdev[i]->base_addr, eth->irq[0]);
4984 /* we run 2 devices on the same DMA ring so we need a dummy device
4987 init_dummy_netdev(ð->dummy_dev);
4988 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx);
4989 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx);
4991 platform_set_drvdata(pdev, eth);
4992 schedule_delayed_work(ð->reset.monitor_work,
4993 MTK_DMA_MONITOR_TIMEOUT);
4998 mtk_ppe_deinit(eth);
4999 mtk_mdio_cleanup(eth);
5007 mtk_sgmii_destroy(eth);
5012 static void mtk_remove(struct platform_device *pdev)
5014 struct mtk_eth *eth = platform_get_drvdata(pdev);
5015 struct mtk_mac *mac;
5018 /* stop all devices to make sure that dma is properly shut down */
5019 for (i = 0; i < MTK_MAX_DEVS; i++) {
5020 if (!eth->netdev[i])
5022 mtk_stop(eth->netdev[i]);
5023 mac = netdev_priv(eth->netdev[i]);
5024 phylink_disconnect_phy(mac->phylink);
5030 netif_napi_del(ð->tx_napi);
5031 netif_napi_del(ð->rx_napi);
5033 mtk_mdio_cleanup(eth);
5036 static const struct mtk_soc_data mt2701_data = {
5037 .reg_map = &mtk_reg_map,
5038 .caps = MT7623_CAPS | MTK_HWLRO,
5039 .hw_features = MTK_HW_FEATURES,
5040 .required_clks = MT7623_CLKS_BITMAP,
5041 .required_pctl = true,
5044 .txd_size = sizeof(struct mtk_tx_dma),
5045 .rxd_size = sizeof(struct mtk_rx_dma),
5046 .rx_irq_done_mask = MTK_RX_DONE_INT,
5047 .rx_dma_l4_valid = RX_DMA_L4_VALID,
5048 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5049 .dma_len_offset = 16,
5053 static const struct mtk_soc_data mt7621_data = {
5054 .reg_map = &mtk_reg_map,
5055 .caps = MT7621_CAPS,
5056 .hw_features = MTK_HW_FEATURES,
5057 .required_clks = MT7621_CLKS_BITMAP,
5058 .required_pctl = false,
5060 .offload_version = 1,
5062 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5064 .txd_size = sizeof(struct mtk_tx_dma),
5065 .rxd_size = sizeof(struct mtk_rx_dma),
5066 .rx_irq_done_mask = MTK_RX_DONE_INT,
5067 .rx_dma_l4_valid = RX_DMA_L4_VALID,
5068 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5069 .dma_len_offset = 16,
5073 static const struct mtk_soc_data mt7622_data = {
5074 .reg_map = &mtk_reg_map,
5076 .caps = MT7622_CAPS | MTK_HWLRO,
5077 .hw_features = MTK_HW_FEATURES,
5078 .required_clks = MT7622_CLKS_BITMAP,
5079 .required_pctl = false,
5081 .offload_version = 2,
5083 .has_accounting = true,
5084 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5086 .txd_size = sizeof(struct mtk_tx_dma),
5087 .rxd_size = sizeof(struct mtk_rx_dma),
5088 .rx_irq_done_mask = MTK_RX_DONE_INT,
5089 .rx_dma_l4_valid = RX_DMA_L4_VALID,
5090 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5091 .dma_len_offset = 16,
5095 static const struct mtk_soc_data mt7623_data = {
5096 .reg_map = &mtk_reg_map,
5097 .caps = MT7623_CAPS | MTK_HWLRO,
5098 .hw_features = MTK_HW_FEATURES,
5099 .required_clks = MT7623_CLKS_BITMAP,
5100 .required_pctl = true,
5102 .offload_version = 1,
5104 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5105 .disable_pll_modes = true,
5107 .txd_size = sizeof(struct mtk_tx_dma),
5108 .rxd_size = sizeof(struct mtk_rx_dma),
5109 .rx_irq_done_mask = MTK_RX_DONE_INT,
5110 .rx_dma_l4_valid = RX_DMA_L4_VALID,
5111 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5112 .dma_len_offset = 16,
5116 static const struct mtk_soc_data mt7629_data = {
5117 .reg_map = &mtk_reg_map,
5119 .caps = MT7629_CAPS | MTK_HWLRO,
5120 .hw_features = MTK_HW_FEATURES,
5121 .required_clks = MT7629_CLKS_BITMAP,
5122 .required_pctl = false,
5123 .has_accounting = true,
5126 .txd_size = sizeof(struct mtk_tx_dma),
5127 .rxd_size = sizeof(struct mtk_rx_dma),
5128 .rx_irq_done_mask = MTK_RX_DONE_INT,
5129 .rx_dma_l4_valid = RX_DMA_L4_VALID,
5130 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5131 .dma_len_offset = 16,
5135 static const struct mtk_soc_data mt7981_data = {
5136 .reg_map = &mt7986_reg_map,
5138 .caps = MT7981_CAPS,
5139 .hw_features = MTK_HW_FEATURES,
5140 .required_clks = MT7981_CLKS_BITMAP,
5141 .required_pctl = false,
5143 .offload_version = 2,
5145 .has_accounting = true,
5146 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5148 .txd_size = sizeof(struct mtk_tx_dma_v2),
5149 .rxd_size = sizeof(struct mtk_rx_dma_v2),
5150 .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
5151 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
5152 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5153 .dma_len_offset = 8,
5157 static const struct mtk_soc_data mt7986_data = {
5158 .reg_map = &mt7986_reg_map,
5160 .caps = MT7986_CAPS,
5161 .hw_features = MTK_HW_FEATURES,
5162 .required_clks = MT7986_CLKS_BITMAP,
5163 .required_pctl = false,
5165 .offload_version = 2,
5167 .has_accounting = true,
5168 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5170 .txd_size = sizeof(struct mtk_tx_dma_v2),
5171 .rxd_size = sizeof(struct mtk_rx_dma_v2),
5172 .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
5173 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
5174 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5175 .dma_len_offset = 8,
5179 static const struct mtk_soc_data mt7988_data = {
5180 .reg_map = &mt7988_reg_map,
5182 .caps = MT7988_CAPS,
5183 .hw_features = MTK_HW_FEATURES,
5184 .required_clks = MT7988_CLKS_BITMAP,
5185 .required_pctl = false,
5187 .offload_version = 2,
5189 .has_accounting = true,
5190 .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5192 .txd_size = sizeof(struct mtk_tx_dma_v2),
5193 .rxd_size = sizeof(struct mtk_rx_dma_v2),
5194 .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
5195 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
5196 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5197 .dma_len_offset = 8,
5201 static const struct mtk_soc_data rt5350_data = {
5202 .reg_map = &mt7628_reg_map,
5203 .caps = MT7628_CAPS,
5204 .hw_features = MTK_HW_FEATURES_MT7628,
5205 .required_clks = MT7628_CLKS_BITMAP,
5206 .required_pctl = false,
5209 .txd_size = sizeof(struct mtk_tx_dma),
5210 .rxd_size = sizeof(struct mtk_rx_dma),
5211 .rx_irq_done_mask = MTK_RX_DONE_INT,
5212 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5213 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5214 .dma_len_offset = 16,
5218 const struct of_device_id of_mtk_match[] = {
5219 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5220 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5221 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5222 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5223 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5224 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5225 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5226 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5227 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5230 MODULE_DEVICE_TABLE(of, of_mtk_match);
5232 static struct platform_driver mtk_driver = {
5234 .remove_new = mtk_remove,
5236 .name = "mtk_soc_eth",
5237 .of_match_table = of_mtk_match,
5241 module_platform_driver(mtk_driver);
5243 MODULE_LICENSE("GPL");
5244 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5245 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");