Merge ra.kernel.org:/pub/scm/linux/kernel/git/netdev/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 17 Sep 2019 21:51:10 +0000 (23:51 +0200)
committerDavid S. Miller <davem@davemloft.net>
Tue, 17 Sep 2019 21:51:10 +0000 (23:51 +0200)
Pull in bug fixes from 'net' tree for the merge window.

Signed-off-by: David S. Miller <davem@davemloft.net>
1  2 
MAINTAINERS
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/xen-netfront.c
include/net/pkt_sched.h
net/core/dev.c
net/dsa/dsa2.c
net/ipv4/udp.c
net/ipv6/udp.c

diff --combined MAINTAINERS
index 32f4f05fb3da8b608066ea3d11af191275a21f56,ceeb7ceae041c5e32f722db6a802d1ccf7d4a2c1..dd39fc578607ab34f2c0159d53dc8d07e3f3275c
@@@ -649,6 -649,12 +649,12 @@@ M:       Lino Sanfilippo <LinoSanfilippo@gmx.
  S:    Maintained
  F:    drivers/net/ethernet/alacritech/*
  
+ FORCEDETH GIGABIT ETHERNET DRIVER
+ M:    Rain River <rain.1986.08.12@gmail.com>
+ L:    netdev@vger.kernel.org
+ S:    Maintained
+ F:    drivers/net/ethernet/nvidia/*
  ALCATEL SPEEDTOUCH USB DRIVER
  M:    Duncan Sands <duncan.sands@free.fr>
  L:    linux-usb@vger.kernel.org
@@@ -938,14 -944,6 +944,14 @@@ S:       Supporte
  F:    drivers/mux/adgs1408.c
  F:    Documentation/devicetree/bindings/mux/adi,adgs1408.txt
  
 +ANALOG DEVICES INC ADIN DRIVER
 +M:    Alexandru Ardelean <alexaundru.ardelean@analog.com>
 +L:    netdev@vger.kernel.org
 +W:    http://ez.analog.com/community/linux-device-drivers
 +S:    Supported
 +F:    drivers/net/phy/adin.c
 +F:    Documentation/devicetree/bindings/net/adi,adin.yaml
 +
  ANALOG DEVICES INC ADIS DRIVER LIBRARY
  M:    Alexandru Ardelean <alexandru.ardelean@analog.com>
  S:    Supported
@@@ -2923,7 -2921,6 +2929,7 @@@ BATMAN ADVANCE
  M:    Marek Lindner <mareklindner@neomailbox.ch>
  M:    Simon Wunderlich <sw@simonwunderlich.de>
  M:    Antonio Quartulli <a@unstable.cc>
 +M:    Sven Eckelmann <sven@narfation.org>
  L:    b.a.t.m.a.n@lists.open-mesh.org (moderated for non-subscribers)
  W:    https://www.open-mesh.org/
  B:    https://www.open-mesh.org/projects/batman-adv/issues
@@@ -3644,12 -3641,9 +3650,12 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/net/can/
  F:    drivers/net/can/
  F:    include/linux/can/dev.h
 +F:    include/linux/can/led.h
 +F:    include/linux/can/rx-offload.h
  F:    include/linux/can/platform/
  F:    include/uapi/linux/can/error.h
  F:    include/uapi/linux/can/netlink.h
 +F:    include/uapi/linux/can/vxcan.h
  
  CAN NETWORK LAYER
  M:    Oliver Hartkopp <socketcan@hartkopp.net>
@@@ -3662,23 -3656,11 +3668,23 @@@ S:   Maintaine
  F:    Documentation/networking/can.rst
  F:    net/can/
  F:    include/linux/can/core.h
 +F:    include/linux/can/skb.h
 +F:    include/net/netns/can.h
  F:    include/uapi/linux/can.h
  F:    include/uapi/linux/can/bcm.h
  F:    include/uapi/linux/can/raw.h
  F:    include/uapi/linux/can/gw.h
  
 +CAN-J1939 NETWORK LAYER
 +M:    Robin van der Gracht <robin@protonic.nl>
 +M:    Oleksij Rempel <o.rempel@pengutronix.de>
 +R:    Pengutronix Kernel Team <kernel@pengutronix.de>
 +L:    linux-can@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/networking/j1939.txt
 +F:    net/can/j1939/
 +F:    include/uapi/linux/can/j1939.h
 +
  CAPABILITIES
  M:    Serge Hallyn <serge@hallyn.com>
  L:    linux-security-module@vger.kernel.org
@@@ -5582,6 -5564,12 +5588,6 @@@ T:     git git://linuxtv.org/media_tree.gi
  S:    Maintained
  F:    drivers/media/radio/dsbr100.c
  
 -DSCC4 DRIVER
 -M:    Francois Romieu <romieu@fr.zoreil.com>
 -L:    netdev@vger.kernel.org
 -S:    Maintained
 -F:    drivers/net/wan/dscc4.c
 -
  DT3155 MEDIA DRIVER
  M:    Hans Verkuil <hverkuil@xs4all.nl>
  L:    linux-media@vger.kernel.org
@@@ -7474,7 -7462,6 +7480,7 @@@ F:      drivers/hid/hid-hyperv.
  F:    drivers/hv/
  F:    drivers/input/serio/hyperv-keyboard.c
  F:    drivers/pci/controller/pci-hyperv.c
 +F:    drivers/pci/controller/pci-hyperv-intf.c
  F:    drivers/net/hyperv/
  F:    drivers/scsi/storvsc_drv.c
  F:    drivers/uio/uio_hv_generic.c
@@@ -11185,7 -11172,6 +11191,7 @@@ S:   Maintaine
  W:    https://fedorahosted.org/dropwatch/
  F:    net/core/drop_monitor.c
  F:    include/uapi/linux/net_dropmon.h
 +F:    include/net/drop_monitor.h
  
  NETWORKING DRIVERS
  M:    "David S. Miller" <davem@davemloft.net>
@@@ -11365,6 -11351,7 +11371,6 @@@ F:   include/net/nfc
  F:    include/uapi/linux/nfc.h
  F:    drivers/nfc/
  F:    include/linux/platform_data/nfcmrvl.h
 -F:    include/linux/platform_data/nxp-nci.h
  F:    Documentation/devicetree/bindings/net/nfc/
  
  NFS, SUNRPC, AND LOCKD CLIENTS
@@@ -12612,14 -12599,6 +12618,14 @@@ L: platform-driver-x86@vger.kernel.or
  S:    Maintained
  F:    drivers/platform/x86/peaq-wmi.c
  
 +PENSANDO ETHERNET DRIVERS
 +M:    Shannon Nelson <snelson@pensando.io>
 +M:    Pensando Drivers <drivers@pensando.io>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    Documentation/networking/device_drivers/pensando/ionic.rst
 +F:    drivers/net/ethernet/pensando/
 +
  PER-CPU MEMORY ALLOCATOR
  M:    Dennis Zhou <dennis@kernel.org>
  M:    Tejun Heo <tj@kernel.org>
@@@ -13267,7 -13246,7 +13273,7 @@@ M:   Manish Chopra <manishc@marvell.com
  M:    GR-Linux-NIC-Dev@marvell.com
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/ethernet/qlogic/qlge/
 +F:    drivers/staging/qlge/
  
  QM1D1B0004 MEDIA DRIVER
  M:    Akihiro Tsukada <tskd08@gmail.com>
@@@ -17673,7 -17652,7 +17679,7 @@@ F:   Documentation/ABI/testing/sysfs-hype
  
  XEN NETWORK BACKEND DRIVER
  M:    Wei Liu <wei.liu@kernel.org>
- M:    Paul Durrant <paul.durrant@citrix.com>
+ M:    Paul Durrant <paul@xen.org>
  L:    xen-devel@lists.xenproject.org (moderated for non-subscribers)
  L:    netdev@vger.kernel.org
  S:    Supported
index e4bf7a4af87ac2446a51d5fa12836a499a453313,d118ed4c57ced4c0d0ffbd7083beddb08665ffdb..c487d2a7d6dd04cfcc0c5693a6f0692a00753cd9
@@@ -158,6 -158,7 +158,6 @@@ static void ena_init_io_rings_common(st
        ring->adapter = adapter;
        ring->ena_dev = adapter->ena_dev;
        ring->per_napi_packets = 0;
 -      ring->per_napi_bytes = 0;
        ring->cpu = 0;
        ring->first_interrupt = false;
        ring->no_interrupt_event_cnt = 0;
@@@ -195,7 -196,6 +195,7 @@@ static void ena_init_io_rings(struct en
                rxr->smoothed_interval =
                        ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
                rxr->empty_rx_queue = 0;
 +              adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
        }
  }
  
@@@ -712,7 -712,6 +712,7 @@@ static void ena_destroy_all_rx_queues(s
  
        for (i = 0; i < adapter->num_queues; i++) {
                ena_qid = ENA_IO_RXQ_IDX(i);
 +              cancel_work_sync(&adapter->ena_napi[i].dim.work);
                ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
        }
  }
@@@ -824,7 -823,8 +824,8 @@@ static int ena_clean_tx_irq(struct ena_
                above_thresh =
                        ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
                                                     ENA_TX_WAKEUP_THRESH);
-               if (netif_tx_queue_stopped(txq) && above_thresh) {
+               if (netif_tx_queue_stopped(txq) && above_thresh &&
+                   test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
                        netif_tx_wake_queue(txq);
                        u64_stats_update_begin(&tx_ring->syncp);
                        tx_ring->tx_stats.queue_wakeup++;
                __netif_tx_unlock(txq);
        }
  
 -      tx_ring->per_napi_bytes += tx_bytes;
 -      tx_ring->per_napi_packets += tx_pkts;
 -
        return tx_pkts;
  }
  
@@@ -1116,6 -1119,7 +1117,6 @@@ static int ena_clean_rx_irq(struct ena_
        } while (likely(res_budget));
  
        work_done = budget - res_budget;
 -      rx_ring->per_napi_bytes += total_len;
        rx_ring->per_napi_packets += work_done;
        u64_stats_update_begin(&rx_ring->syncp);
        rx_ring->rx_stats.bytes += total_len;
@@@ -1152,50 -1156,35 +1153,50 @@@ error
        return 0;
  }
  
 -void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
 -                                     struct ena_ring *tx_ring)
 +static void ena_dim_work(struct work_struct *w)
  {
 -      /* We apply adaptive moderation on Rx path only.
 -       * Tx uses static interrupt moderation.
 -       */
 -      ena_com_calculate_interrupt_delay(rx_ring->ena_dev,
 -                                        rx_ring->per_napi_packets,
 -                                        rx_ring->per_napi_bytes,
 -                                        &rx_ring->smoothed_interval,
 -                                        &rx_ring->moder_tbl_idx);
 -
 -      /* Reset per napi packets/bytes */
 -      tx_ring->per_napi_packets = 0;
 -      tx_ring->per_napi_bytes = 0;
 +      struct dim *dim = container_of(w, struct dim, work);
 +      struct dim_cq_moder cur_moder =
 +              net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
 +      struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
 +
 +      ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
 +      dim->state = DIM_START_MEASURE;
 +}
 +
 +static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
 +{
 +      struct dim_sample dim_sample;
 +      struct ena_ring *rx_ring = ena_napi->rx_ring;
 +
 +      if (!rx_ring->per_napi_packets)
 +              return;
 +
 +      rx_ring->non_empty_napi_events++;
 +
 +      dim_update_sample(rx_ring->non_empty_napi_events,
 +                        rx_ring->rx_stats.cnt,
 +                        rx_ring->rx_stats.bytes,
 +                        &dim_sample);
 +
 +      net_dim(&ena_napi->dim, dim_sample);
 +
        rx_ring->per_napi_packets = 0;
 -      rx_ring->per_napi_bytes = 0;
  }
  
  static void ena_unmask_interrupt(struct ena_ring *tx_ring,
                                        struct ena_ring *rx_ring)
  {
        struct ena_eth_io_intr_reg intr_reg;
 +      u32 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
 +              rx_ring->smoothed_interval :
 +              ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
  
        /* Update intr register: rx intr delay,
         * tx intr delay and interrupt unmask
         */
        ena_com_update_intr_reg(&intr_reg,
 -                              rx_ring->smoothed_interval,
 +                              rx_interval,
                                tx_ring->smoothed_interval,
                                true);
  
@@@ -1272,11 -1261,9 +1273,11 @@@ static int ena_io_poll(struct napi_stru
                 * from the interrupt context (vs from sk_busy_loop)
                 */
                if (napi_complete_done(napi, rx_work_done)) {
 -                      /* Tx and Rx share the same interrupt vector */
 +                      /* We apply adaptive moderation on Rx path only.
 +                       * Tx uses static interrupt moderation.
 +                       */
                        if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
 -                              ena_adjust_intr_moderation(rx_ring, tx_ring);
 +                              ena_adjust_adaptive_rx_intr_moderation(ena_napi);
  
                        ena_unmask_interrupt(tx_ring, rx_ring);
                }
@@@ -1566,6 -1553,14 +1567,6 @@@ static void ena_napi_enable_all(struct 
                napi_enable(&adapter->ena_napi[i].napi);
  }
  
 -static void ena_restore_ethtool_params(struct ena_adapter *adapter)
 -{
 -      adapter->tx_usecs = 0;
 -      adapter->rx_usecs = 0;
 -      adapter->tx_frames = 1;
 -      adapter->rx_frames = 1;
 -}
 -
  /* Configure the Rx forwarding */
  static int ena_rss_configure(struct ena_adapter *adapter)
  {
@@@ -1615,6 -1610,8 +1616,6 @@@ static int ena_up_complete(struct ena_a
        /* enable transmits */
        netif_tx_start_all_queues(adapter->netdev);
  
 -      ena_restore_ethtool_params(adapter);
 -
        ena_napi_enable_all(adapter);
  
        return 0;
@@@ -1744,16 -1741,13 +1745,16 @@@ static int ena_create_all_io_rx_queues(
                rc = ena_create_io_rx_queue(adapter, i);
                if (rc)
                        goto create_err;
 +              INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
        }
  
        return 0;
  
  create_err:
 -      while (i--)
 +      while (i--) {
 +              cancel_work_sync(&adapter->ena_napi[i].dim.work);
                ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
 +      }
  
        return rc;
  }
@@@ -2426,9 -2420,6 +2427,9 @@@ static void ena_config_host_info(struc
                ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
        host_info->num_cpus = num_online_cpus();
  
 +      host_info->driver_supported_features =
 +              ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
 +
        rc = ena_com_set_host_attributes(ena_dev);
        if (rc) {
                if (rc == -EOPNOTSUPP)
@@@ -3495,12 -3486,10 +3496,12 @@@ static int ena_probe(struct pci_dev *pd
        calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
        calc_queue_ctx.pdev = pdev;
  
 -      /* initial Tx interrupt delay, Assumes 1 usec granularity.
 +      /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
        * Updated during device initialization with the real granularity
        */
        ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
 +      ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
 +      ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
        io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
        rc = ena_calc_queue_size(&calc_queue_ctx);
        if (rc || io_queue_num <= 0) {
@@@ -3630,6 -3619,7 +3631,6 @@@ err_free_msix
        ena_free_mgmnt_irq(adapter);
        ena_disable_msix(adapter);
  err_worker_destroy:
 -      ena_com_destroy_interrupt_moderation(ena_dev);
        del_timer(&adapter->timer_service);
  err_netdev_destroy:
        free_netdev(netdev);
@@@ -3690,6 -3680,8 +3691,6 @@@ static void ena_remove(struct pci_dev *
  
        pci_disable_device(pdev);
  
 -      ena_com_destroy_interrupt_moderation(ena_dev);
 -
        vfree(ena_dev);
  }
  
index c61d702fe83a176e2f84eb43f6a842ee1c2cb96e,b19ab09cb18f7f78e32db20ec762befa94b4eea9..a6cb2aa60e6485d4a0126d3330ac0c348a9c2baa
@@@ -105,7 -105,7 +105,7 @@@ MODULE_PARM_DESC(chain_mode, "To use ch
  static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
  
  #ifdef CONFIG_DEBUG_FS
 -static int stmmac_init_fs(struct net_device *dev);
 +static void stmmac_init_fs(struct net_device *dev);
  static void stmmac_exit_fs(struct net_device *dev);
  #endif
  
@@@ -432,7 -432,6 +432,7 @@@ static void stmmac_get_tx_hwtstamp(stru
                                   struct dma_desc *p, struct sk_buff *skb)
  {
        struct skb_shared_hwtstamps shhwtstamp;
 +      bool found = false;
        u64 ns = 0;
  
        if (!priv->hwts_tx_en)
  
        /* check tx tstamp status */
        if (stmmac_get_tx_timestamp_status(priv, p)) {
 -              /* get the valid tstamp */
                stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
 +              found = true;
 +      } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
 +              found = true;
 +      }
  
 +      if (found) {
                memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
                shhwtstamp.hwtstamp = ns_to_ktime(ns);
  
                /* pass tstamp to stack */
                skb_tstamp_tx(skb, &shhwtstamp);
        }
 -
 -      return;
  }
  
  /* stmmac_get_rx_hwtstamp - get HW RX timestamps
@@@ -831,22 -828,15 +831,22 @@@ static void stmmac_validate(struct phyl
                phylink_set(mask, 1000baseT_Full);
                phylink_set(mask, 1000baseX_Full);
        } else if (priv->plat->has_xgmac) {
 -              phylink_set(mac_supported, 2500baseT_Full);
 -              phylink_set(mac_supported, 5000baseT_Full);
 -              phylink_set(mac_supported, 10000baseSR_Full);
 -              phylink_set(mac_supported, 10000baseLR_Full);
 -              phylink_set(mac_supported, 10000baseER_Full);
 -              phylink_set(mac_supported, 10000baseLRM_Full);
 -              phylink_set(mac_supported, 10000baseT_Full);
 -              phylink_set(mac_supported, 10000baseKX4_Full);
 -              phylink_set(mac_supported, 10000baseKR_Full);
 +              if (!max_speed || (max_speed >= 2500)) {
 +                      phylink_set(mac_supported, 2500baseT_Full);
 +                      phylink_set(mac_supported, 2500baseX_Full);
 +              }
 +              if (!max_speed || (max_speed >= 5000)) {
 +                      phylink_set(mac_supported, 5000baseT_Full);
 +              }
 +              if (!max_speed || (max_speed >= 10000)) {
 +                      phylink_set(mac_supported, 10000baseSR_Full);
 +                      phylink_set(mac_supported, 10000baseLR_Full);
 +                      phylink_set(mac_supported, 10000baseER_Full);
 +                      phylink_set(mac_supported, 10000baseLRM_Full);
 +                      phylink_set(mac_supported, 10000baseT_Full);
 +                      phylink_set(mac_supported, 10000baseKX4_Full);
 +                      phylink_set(mac_supported, 10000baseKR_Full);
 +              }
        }
  
        /* Half-Duplex can only work with single queue */
@@@ -1036,7 -1026,7 +1036,7 @@@ static int stmmac_init_phy(struct net_d
  static int stmmac_phy_setup(struct stmmac_priv *priv)
  {
        struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
 -      int mode = priv->plat->interface;
 +      int mode = priv->plat->phy_interface;
        struct phylink *phylink;
  
        priv->phylink_config.dev = &priv->dev->dev;
@@@ -1208,17 -1198,6 +1208,17 @@@ static int stmmac_init_rx_buffers(struc
        if (!buf->page)
                return -ENOMEM;
  
 +      if (priv->sph) {
 +              buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
 +              if (!buf->sec_page)
 +                      return -ENOMEM;
 +
 +              buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
 +              stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
 +      } else {
 +              buf->sec_page = NULL;
 +      }
 +
        buf->addr = page_pool_get_dma_addr(buf->page);
        stmmac_set_desc_addr(priv, p, buf->addr);
        if (priv->dma_buf_sz == BUF_SIZE_16KiB)
@@@ -1241,10 -1220,6 +1241,10 @@@ static void stmmac_free_rx_buffer(struc
        if (buf->page)
                page_pool_put_page(rx_q->page_pool, buf->page, false);
        buf->page = NULL;
 +
 +      if (buf->sec_page)
 +              page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
 +      buf->sec_page = NULL;
  }
  
  /**
@@@ -2442,22 -2417,6 +2442,22 @@@ static void stmmac_mac_config_rx_queues
        }
  }
  
 +static void stmmac_mac_config_rss(struct stmmac_priv *priv)
 +{
 +      if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
 +              priv->rss.enable = false;
 +              return;
 +      }
 +
 +      if (priv->dev->features & NETIF_F_RXHASH)
 +              priv->rss.enable = true;
 +      else
 +              priv->rss.enable = false;
 +
 +      stmmac_rss_configure(priv, priv->hw, &priv->rss,
 +                           priv->plat->rx_queues_to_use);
 +}
 +
  /**
   *  stmmac_mtl_configuration - Configure MTL
   *  @priv: driver private structure
@@@ -2502,10 -2461,6 +2502,10 @@@ static void stmmac_mtl_configuration(st
        /* Set RX routing */
        if (rx_queues_count > 1)
                stmmac_mac_config_rx_queues_routing(priv);
 +
 +      /* Receive Side Scaling */
 +      if (rx_queues_count > 1)
 +              stmmac_mac_config_rss(priv);
  }
  
  static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
@@@ -2618,16 -2573,6 +2618,16 @@@ static int stmmac_hw_setup(struct net_d
                        stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
        }
  
 +      /* Enable Split Header */
 +      if (priv->sph && priv->hw->rx_csum) {
 +              for (chan = 0; chan < rx_cnt; chan++)
 +                      stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
 +      }
 +
 +      /* VLAN Tag Insertion */
 +      if (priv->dma_cap.vlins)
 +              stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
 +
        /* Start the ball rolling... */
        stmmac_start_all_dma(priv);
  
@@@ -2805,33 -2750,6 +2805,33 @@@ static int stmmac_release(struct net_de
        return 0;
  }
  
 +static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
 +                             struct stmmac_tx_queue *tx_q)
 +{
 +      u16 tag = 0x0, inner_tag = 0x0;
 +      u32 inner_type = 0x0;
 +      struct dma_desc *p;
 +
 +      if (!priv->dma_cap.vlins)
 +              return false;
 +      if (!skb_vlan_tag_present(skb))
 +              return false;
 +      if (skb->vlan_proto == htons(ETH_P_8021AD)) {
 +              inner_tag = skb_vlan_tag_get(skb);
 +              inner_type = STMMAC_VLAN_INSERT;
 +      }
 +
 +      tag = skb_vlan_tag_get(skb);
 +
 +      p = tx_q->dma_tx + tx_q->cur_tx;
 +      if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
 +              return false;
 +
 +      stmmac_set_tx_owner(priv, p);
 +      tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
 +      return true;
 +}
 +
  /**
   *  stmmac_tso_allocator - close entry point of the driver
   *  @priv: driver private structure
@@@ -2911,13 -2829,12 +2911,13 @@@ static netdev_tx_t stmmac_tso_xmit(stru
        struct stmmac_priv *priv = netdev_priv(dev);
        int nfrags = skb_shinfo(skb)->nr_frags;
        u32 queue = skb_get_queue_mapping(skb);
 -      unsigned int first_entry;
        struct stmmac_tx_queue *tx_q;
 +      unsigned int first_entry;
        int tmp_pay_len = 0;
        u32 pay_len, mss;
        u8 proto_hdr_len;
        dma_addr_t des;
 +      bool has_vlan;
        int i;
  
        tx_q = &priv->tx_queue[queue];
                        skb->data_len);
        }
  
 +      /* Check if VLAN can be inserted by HW */
 +      has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
 +
        first_entry = tx_q->cur_tx;
        WARN_ON(tx_q->tx_skbuff[first_entry]);
  
        desc = tx_q->dma_tx + first_entry;
        first = desc;
  
 +      if (has_vlan)
 +              stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
 +
        /* first descriptor: fill Headers on Buf1 */
        des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
                             DMA_TO_DEVICE);
                priv->xstats.tx_set_ic_bit++;
        }
  
 +      if (priv->sarc_type)
 +              stmmac_set_desc_sarc(priv, first, priv->sarc_type);
 +
        skb_tx_timestamp(skb);
  
        if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
@@@ -3130,7 -3038,6 +3130,7 @@@ static netdev_tx_t stmmac_xmit(struct s
        unsigned int first_entry;
        unsigned int enh_desc;
        dma_addr_t des;
 +      bool has_vlan;
        int entry;
  
        tx_q = &priv->tx_queue[queue];
                return NETDEV_TX_BUSY;
        }
  
 +      /* Check if VLAN can be inserted by HW */
 +      has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
 +
        entry = tx_q->cur_tx;
        first_entry = entry;
        WARN_ON(tx_q->tx_skbuff[first_entry]);
  
        first = desc;
  
 +      if (has_vlan)
 +              stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
 +
        enh_desc = priv->plat->enh_desc;
        /* To program the descriptors according to the size of the frame */
        if (enh_desc)
                priv->xstats.tx_set_ic_bit++;
        }
  
 +      if (priv->sarc_type)
 +              stmmac_set_desc_sarc(priv, first, priv->sarc_type);
 +
        skb_tx_timestamp(skb);
  
        /* Ready to fill the first descriptor and set the OWN bit w/o any
@@@ -3394,17 -3292,6 +3394,17 @@@ static inline void stmmac_rx_refill(str
                                break;
                }
  
 +              if (priv->sph && !buf->sec_page) {
 +                      buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
 +                      if (!buf->sec_page)
 +                              break;
 +
 +                      buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
 +
 +                      dma_sync_single_for_device(priv->device, buf->sec_addr,
 +                                                 len, DMA_FROM_DEVICE);
 +              }
 +
                buf->addr = page_pool_get_dma_addr(buf->page);
  
                /* Sync whole allocation to device. This will invalidate old
                                           DMA_FROM_DEVICE);
  
                stmmac_set_desc_addr(priv, p, buf->addr);
 +              stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
                stmmac_refill_desc3(priv, rx_q, p);
  
                rx_q->rx_count_frames++;
 -              rx_q->rx_count_frames %= priv->rx_coal_frames;
 +              rx_q->rx_count_frames += priv->rx_coal_frames;
 +              if (rx_q->rx_count_frames > priv->rx_coal_frames)
 +                      rx_q->rx_count_frames = 0;
                use_rx_wd = priv->use_riwt && rx_q->rx_count_frames;
  
                dma_wmb();
@@@ -3446,10 -3330,9 +3446,10 @@@ static int stmmac_rx(struct stmmac_pri
  {
        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        struct stmmac_channel *ch = &priv->channel[queue];
 +      unsigned int count = 0, error = 0, len = 0;
 +      int status = 0, coe = priv->hw->rx_csum;
        unsigned int next_entry = rx_q->cur_rx;
 -      int coe = priv->hw->rx_csum;
 -      unsigned int count = 0;
 +      struct sk_buff *skb = NULL;
  
        if (netif_msg_rx_status(priv)) {
                void *rx_head;
                stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
        }
        while (count < limit) {
 +              unsigned int hlen = 0, prev_len = 0;
 +              enum pkt_hash_types hash_type;
                struct stmmac_rx_buffer *buf;
                struct dma_desc *np, *p;
 -              int entry, status;
 +              unsigned int sec_len;
 +              int entry;
 +              u32 hash;
 +
 +              if (!count && rx_q->state_saved) {
 +                      skb = rx_q->state.skb;
 +                      error = rx_q->state.error;
 +                      len = rx_q->state.len;
 +              } else {
 +                      rx_q->state_saved = false;
 +                      skb = NULL;
 +                      error = 0;
 +                      len = 0;
 +              }
  
 +              if (count >= limit)
 +                      break;
 +
 +read_again:
 +              sec_len = 0;
                entry = next_entry;
                buf = &rx_q->buf_pool[entry];
  
                        np = rx_q->dma_rx + next_entry;
  
                prefetch(np);
 +              prefetch(page_address(buf->page));
  
                if (priv->extend_desc)
                        stmmac_rx_extended_status(priv, &priv->dev->stats,
                                        &priv->xstats, rx_q->dma_erx + entry);
                if (unlikely(status == discard_frame)) {
                        page_pool_recycle_direct(rx_q->page_pool, buf->page);
 -                      priv->dev->stats.rx_errors++;
                        buf->page = NULL;
 -              } else {
 -                      struct sk_buff *skb;
 -                      int frame_len;
 -                      unsigned int des;
 +                      error = 1;
 +                      if (!priv->hwts_rx_en)
 +                              priv->dev->stats.rx_errors++;
 +              }
  
 -                      stmmac_get_desc_addr(priv, p, &des);
 -                      frame_len = stmmac_get_rx_frame_len(priv, p, coe);
 +              if (unlikely(error && (status & rx_not_ls)))
 +                      goto read_again;
 +              if (unlikely(error)) {
 +                      dev_kfree_skb(skb);
 +                      continue;
 +              }
  
 -                      /*  If frame length is greater than skb buffer size
 -                       *  (preallocated during init) then the packet is
 -                       *  ignored
 -                       */
 -                      if (frame_len > priv->dma_buf_sz) {
 -                              if (net_ratelimit())
 -                                      netdev_err(priv->dev,
 -                                                 "len %d larger than size (%d)\n",
 -                                                 frame_len, priv->dma_buf_sz);
 -                              priv->dev->stats.rx_length_errors++;
 -                              continue;
 -                      }
 +              /* Buffer is good. Go on. */
 +
 +              if (likely(status & rx_not_ls)) {
 +                      len += priv->dma_buf_sz;
 +              } else {
 +                      prev_len = len;
 +                      len = stmmac_get_rx_frame_len(priv, p, coe);
  
                        /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
                         * Type frames (LLC/LLC-SNAP)
                         */
                        if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
                            unlikely(status != llc_snap))
 -                              frame_len -= ETH_FCS_LEN;
 +                              len -= ETH_FCS_LEN;
 +              }
 +
 +              if (!skb) {
 +                      int ret = stmmac_get_rx_header_len(priv, p, &hlen);
  
 -                      if (netif_msg_rx_status(priv)) {
 -                              netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
 -                                         p, entry, des);
 -                              netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
 -                                         frame_len, status);
 +                      if (priv->sph && !ret && (hlen > 0)) {
 +                              sec_len = len;
 +                              if (!(status & rx_not_ls))
 +                                      sec_len = sec_len - hlen;
 +                              len = hlen;
 +
 +                              prefetch(page_address(buf->sec_page));
 +                              priv->xstats.rx_split_hdr_pkt_n++;
                        }
  
 -                      skb = netdev_alloc_skb_ip_align(priv->dev, frame_len);
 -                      if (unlikely(!skb)) {
 +                      skb = napi_alloc_skb(&ch->rx_napi, len);
 +                      if (!skb) {
                                priv->dev->stats.rx_dropped++;
                                continue;
                        }
  
 -                      dma_sync_single_for_cpu(priv->device, buf->addr,
 -                                              frame_len, DMA_FROM_DEVICE);
 +                      dma_sync_single_for_cpu(priv->device, buf->addr, len,
 +                                              DMA_FROM_DEVICE);
                        skb_copy_to_linear_data(skb, page_address(buf->page),
 -                                              frame_len);
 -                      skb_put(skb, frame_len);
 +                                              len);
 +                      skb_put(skb, len);
  
 -                      if (netif_msg_pktdata(priv)) {
 -                              netdev_dbg(priv->dev, "frame received (%dbytes)",
 -                                         frame_len);
 -                              print_pkt(skb->data, frame_len);
 -                      }
 -
 -                      stmmac_get_rx_hwtstamp(priv, p, np, skb);
 +                      /* Data payload copied into SKB, page ready for recycle */
 +                      page_pool_recycle_direct(rx_q->page_pool, buf->page);
 +                      buf->page = NULL;
 +              } else {
 +                      unsigned int buf_len = len - prev_len;
  
 -                      stmmac_rx_vlan(priv->dev, skb);
 +                      if (likely(status & rx_not_ls))
 +                              buf_len = priv->dma_buf_sz;
  
 -                      skb->protocol = eth_type_trans(skb, priv->dev);
 +                      dma_sync_single_for_cpu(priv->device, buf->addr,
 +                                              buf_len, DMA_FROM_DEVICE);
 +                      skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 +                                      buf->page, 0, buf_len,
 +                                      priv->dma_buf_sz);
  
 -                      if (unlikely(!coe))
 -                              skb_checksum_none_assert(skb);
 -                      else
 -                              skb->ip_summed = CHECKSUM_UNNECESSARY;
 +                      /* Data payload appended into SKB */
 +                      page_pool_release_page(rx_q->page_pool, buf->page);
 +                      buf->page = NULL;
 +              }
  
 -                      napi_gro_receive(&ch->rx_napi, skb);
 +              if (sec_len > 0) {
 +                      dma_sync_single_for_cpu(priv->device, buf->sec_addr,
 +                                              sec_len, DMA_FROM_DEVICE);
 +                      skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 +                                      buf->sec_page, 0, sec_len,
 +                                      priv->dma_buf_sz);
  
 -                      /* Data payload copied into SKB, page ready for recycle */
 -                      page_pool_recycle_direct(rx_q->page_pool, buf->page);
 -                      buf->page = NULL;
 +                      len += sec_len;
  
 -                      priv->dev->stats.rx_packets++;
 -                      priv->dev->stats.rx_bytes += frame_len;
 +                      /* Data payload appended into SKB */
 +                      page_pool_release_page(rx_q->page_pool, buf->sec_page);
 +                      buf->sec_page = NULL;
                }
 +
 +              if (likely(status & rx_not_ls))
 +                      goto read_again;
 +
 +              /* Got entire packet into SKB. Finish it. */
 +
 +              stmmac_get_rx_hwtstamp(priv, p, np, skb);
 +              stmmac_rx_vlan(priv->dev, skb);
 +              skb->protocol = eth_type_trans(skb, priv->dev);
 +
 +              if (unlikely(!coe))
 +                      skb_checksum_none_assert(skb);
 +              else
 +                      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +
 +              if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
 +                      skb_set_hash(skb, hash, hash_type);
 +
 +              skb_record_rx_queue(skb, queue);
 +              napi_gro_receive(&ch->rx_napi, skb);
 +
 +              priv->dev->stats.rx_packets++;
 +              priv->dev->stats.rx_bytes += len;
 +      }
 +
 +      if (status & rx_not_ls) {
 +              rx_q->state_saved = true;
 +              rx_q->state.skb = skb;
 +              rx_q->state.error = error;
 +              rx_q->state.len = len;
        }
  
        stmmac_rx_refill(priv, queue);
@@@ -3786,8 -3606,6 +3786,8 @@@ static int stmmac_set_features(struct n
                               netdev_features_t features)
  {
        struct stmmac_priv *priv = netdev_priv(netdev);
 +      bool sph_en;
 +      u32 chan;
  
        /* Keep the COE Type in case of csum is supporting */
        if (features & NETIF_F_RXCSUM)
         */
        stmmac_rx_ipc(priv, priv->hw);
  
 +      sph_en = (priv->hw->rx_csum > 0) && priv->sph;
 +      for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
 +              stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
 +
        return 0;
  }
  
@@@ -3941,17 -3755,12 +3941,17 @@@ static int stmmac_setup_tc_block_cb(enu
        struct stmmac_priv *priv = cb_priv;
        int ret = -EOPNOTSUPP;
  
 +      if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
 +              return ret;
 +
        stmmac_disable_all_queues(priv);
  
        switch (type) {
        case TC_SETUP_CLSU32:
 -              if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
 -                      ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
 +              ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
 +              break;
 +      case TC_SETUP_CLSFLOWER:
 +              ret = stmmac_tc_setup_cls(priv, priv, type_data);
                break;
        default:
                break;
@@@ -4153,102 -3962,54 +4153,102 @@@ static int stmmac_dma_cap_show(struct s
  }
  DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
  
 -static int stmmac_init_fs(struct net_device *dev)
 +static void stmmac_init_fs(struct net_device *dev)
  {
        struct stmmac_priv *priv = netdev_priv(dev);
  
        /* Create per netdev entries */
        priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
  
 -      if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
 -              netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
 +      /* Entry to report DMA RX/TX rings */
 +      debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
 +                          &stmmac_rings_status_fops);
  
 -              return -ENOMEM;
 +      /* Entry to report the DMA HW features */
 +      debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
 +                          &stmmac_dma_cap_fops);
 +}
 +
 +static void stmmac_exit_fs(struct net_device *dev)
 +{
 +      struct stmmac_priv *priv = netdev_priv(dev);
 +
 +      debugfs_remove_recursive(priv->dbgfs_dir);
 +}
 +#endif /* CONFIG_DEBUG_FS */
 +
 +static u32 stmmac_vid_crc32_le(__le16 vid_le)
 +{
 +      unsigned char *data = (unsigned char *)&vid_le;
 +      unsigned char data_byte = 0;
 +      u32 crc = ~0x0;
 +      u32 temp = 0;
 +      int i, bits;
 +
 +      bits = get_bitmask_order(VLAN_VID_MASK);
 +      for (i = 0; i < bits; i++) {
 +              if ((i % 8) == 0)
 +                      data_byte = data[i / 8];
 +
 +              temp = ((crc & 1) ^ data_byte) & 1;
 +              crc >>= 1;
 +              data_byte >>= 1;
 +
 +              if (temp)
 +                      crc ^= 0xedb88320;
        }
  
 -      /* Entry to report DMA RX/TX rings */
 -      priv->dbgfs_rings_status =
 -              debugfs_create_file("descriptors_status", 0444,
 -                                  priv->dbgfs_dir, dev,
 -                                  &stmmac_rings_status_fops);
 +      return crc;
 +}
  
 -      if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
 -              netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
 -              debugfs_remove_recursive(priv->dbgfs_dir);
 +static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
 +{
 +      u32 crc, hash = 0;
 +      u16 vid;
  
 -              return -ENOMEM;
 +      for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
 +              __le16 vid_le = cpu_to_le16(vid);
 +              crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
 +              hash |= (1 << crc);
        }
  
 -      /* Entry to report the DMA HW features */
 -      priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
 -                                                priv->dbgfs_dir,
 -                                                dev, &stmmac_dma_cap_fops);
 +      return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
 +}
  
 -      if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
 -              netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
 -              debugfs_remove_recursive(priv->dbgfs_dir);
 +static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
 +{
 +      struct stmmac_priv *priv = netdev_priv(ndev);
 +      bool is_double = false;
 +      int ret;
  
 -              return -ENOMEM;
 +      if (!priv->dma_cap.vlhash)
 +              return -EOPNOTSUPP;
 +      if (be16_to_cpu(proto) == ETH_P_8021AD)
 +              is_double = true;
 +
 +      set_bit(vid, priv->active_vlans);
 +      ret = stmmac_vlan_update(priv, is_double);
 +      if (ret) {
 +              clear_bit(vid, priv->active_vlans);
 +              return ret;
        }
  
 -      return 0;
 +      return ret;
  }
  
 -static void stmmac_exit_fs(struct net_device *dev)
 +static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
  {
 -      struct stmmac_priv *priv = netdev_priv(dev);
 +      struct stmmac_priv *priv = netdev_priv(ndev);
 +      bool is_double = false;
  
 -      debugfs_remove_recursive(priv->dbgfs_dir);
 +      if (!priv->dma_cap.vlhash)
 +              return -EOPNOTSUPP;
 +      if (be16_to_cpu(proto) == ETH_P_8021AD)
 +              is_double = true;
 +
 +      clear_bit(vid, priv->active_vlans);
 +      return stmmac_vlan_update(priv, is_double);
  }
 -#endif /* CONFIG_DEBUG_FS */
  
  static const struct net_device_ops stmmac_netdev_ops = {
        .ndo_open = stmmac_open,
        .ndo_poll_controller = stmmac_poll_controller,
  #endif
        .ndo_set_mac_address = stmmac_set_mac_address,
 +      .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
 +      .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
  };
  
  static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@@ -4416,8 -4175,8 +4416,8 @@@ int stmmac_dvr_probe(struct device *dev
  {
        struct net_device *ndev = NULL;
        struct stmmac_priv *priv;
 -      u32 queue, maxq;
 -      int ret = 0;
 +      u32 queue, rxq, maxq;
 +      int i, ret = 0;
  
        ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
                                       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
                dev_info(priv->device, "TSO feature enabled\n");
        }
  
 +      if (priv->dma_cap.sphen) {
 +              ndev->hw_features |= NETIF_F_GRO;
 +              priv->sph = true;
 +              dev_info(priv->device, "SPH feature enabled\n");
 +      }
 +
        if (priv->dma_cap.addr64) {
                ret = dma_set_mask_and_coherent(device,
                                DMA_BIT_MASK(priv->dma_cap.addr64));
  #ifdef STMMAC_VLAN_TAG_USED
        /* Both mac100 and gmac support receive VLAN tag detection */
        ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
 +      if (priv->dma_cap.vlhash) {
 +              ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 +              ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
 +      }
 +      if (priv->dma_cap.vlins) {
 +              ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
 +              if (priv->dma_cap.dvlan)
 +                      ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
 +      }
  #endif
        priv->msg_enable = netif_msg_init(debug, default_msg_level);
  
 +      /* Initialize RSS */
 +      rxq = priv->plat->rx_queues_to_use;
 +      netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
 +      for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
 +              priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
 +
 +      if (priv->dma_cap.rssen && priv->plat->rss_en)
 +              ndev->features |= NETIF_F_RXHASH;
 +
        /* MTU range: 46 - hw-specific max */
        ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
 -      if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
 -              ndev->max_mtu = JUMBO_LEN;
 -      else if (priv->plat->has_xgmac)
 +      if (priv->plat->has_xgmac)
                ndev->max_mtu = XGMAC_JUMBO_LEN;
 +      else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
 +              ndev->max_mtu = JUMBO_LEN;
        else
                ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
        /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
        }
  
  #ifdef CONFIG_DEBUG_FS
 -      ret = stmmac_init_fs(ndev);
 -      if (ret < 0)
 -              netdev_warn(priv->dev, "%s: failed debugFS registration\n",
 -                          __func__);
 +      stmmac_init_fs(ndev);
  #endif
  
        return ret;
@@@ -4713,10 -4451,12 +4713,12 @@@ int stmmac_suspend(struct device *dev
        if (!ndev || !netif_running(ndev))
                return 0;
  
-       phylink_stop(priv->phylink);
        mutex_lock(&priv->lock);
  
+       rtnl_lock();
+       phylink_stop(priv->phylink);
+       rtnl_unlock();
        netif_device_detach(ndev);
        stmmac_stop_all_queues(priv);
  
@@@ -4820,9 -4560,11 +4822,11 @@@ int stmmac_resume(struct device *dev
  
        stmmac_start_all_queues(priv);
  
-       mutex_unlock(&priv->lock);
+       rtnl_lock();
        phylink_start(priv->phylink);
+       rtnl_unlock();
+       mutex_unlock(&priv->lock);
  
        return 0;
  }
@@@ -4879,8 -4621,16 +4883,8 @@@ static int __init stmmac_init(void
  {
  #ifdef CONFIG_DEBUG_FS
        /* Create debugfs main directory if it doesn't exist yet */
 -      if (!stmmac_fs_dir) {
 +      if (!stmmac_fs_dir)
                stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
 -
 -              if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
 -                      pr_err("ERROR %s, debugfs create directory failed\n",
 -                             STMMAC_RESOURCE_NAME);
 -
 -                      return -ENOMEM;
 -              }
 -      }
  #endif
  
        return 0;
index b930d5f9522234c98116f2ea20b2c366f754a813,5f5722bf6762dcc3e63d3b3ef82126025894f31b..e14ec75b61d60776008b05d1bbd0b0a0d3055bbb
@@@ -531,7 -531,7 +531,7 @@@ static int xennet_count_skb_slots(struc
        for (i = 0; i < frags; i++) {
                skb_frag_t *frag = skb_shinfo(skb)->frags + i;
                unsigned long size = skb_frag_size(frag);
 -              unsigned long offset = frag->page_offset;
 +              unsigned long offset = skb_frag_off(frag);
  
                /* Skip unused frames from start of page */
                offset &= ~PAGE_MASK;
@@@ -674,8 -674,8 +674,8 @@@ static netdev_tx_t xennet_start_xmit(st
        /* Requests for all the frags. */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 -              tx = xennet_make_txreqs(queue, tx, skb,
 -                                      skb_frag_page(frag), frag->page_offset,
 +              tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
 +                                      skb_frag_off(frag),
                                        skb_frag_size(frag));
        }
  
@@@ -906,7 -906,7 +906,7 @@@ static RING_IDX xennet_fill_frags(struc
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
                if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
-                       queue->rx.rsp_cons = ++cons;
+                       queue->rx.rsp_cons = ++cons + skb_queue_len(list);
                        kfree_skb(nskb);
                        return ~0U;
                }
@@@ -1040,7 -1040,7 +1040,7 @@@ err
                if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
                        NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
  
 -              skb_shinfo(skb)->frags[0].page_offset = rx->offset;
 +              skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
                skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
                skb->data_len = rx->status;
                skb->len += rx->status;
diff --combined include/net/pkt_sched.h
index d1632979622e746abe163407bb0388e5ad278fbb,aa99c73c3fbd3353697970d9d6edb9c6871b4a9b..6a70845bd9ab00e3f3607fcf102d93c948ea6884
@@@ -118,7 -118,12 +118,12 @@@ void __qdisc_run(struct Qdisc *q)
  static inline void qdisc_run(struct Qdisc *q)
  {
        if (qdisc_run_begin(q)) {
-               __qdisc_run(q);
+               /* NOLOCK qdisc must check 'state' under the qdisc seqlock
+                * to avoid racing with dev_qdisc_reset()
+                */
+               if (!(q->flags & TCQ_F_NOLOCK) ||
+                   likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+                       __qdisc_run(q);
                qdisc_run_end(q);
        }
  }
@@@ -161,27 -166,4 +166,27 @@@ struct tc_etf_qopt_offload 
        s32 queue;
  };
  
 +struct tc_taprio_sched_entry {
 +      u8 command; /* TC_TAPRIO_CMD_* */
 +
 +      /* The gate_mask in the offloading side refers to traffic classes */
 +      u32 gate_mask;
 +      u32 interval;
 +};
 +
 +struct tc_taprio_qopt_offload {
 +      u8 enable;
 +      ktime_t base_time;
 +      u64 cycle_time;
 +      u64 cycle_time_extension;
 +
 +      size_t num_entries;
 +      struct tc_taprio_sched_entry entries[0];
 +};
 +
 +/* Reference counting */
 +struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
 +                                                *offload);
 +void taprio_offload_free(struct tc_taprio_qopt_offload *offload);
 +
  #endif
diff --combined net/core/dev.c
index a9775d676285379cb82f2b69fb3e3b96003ae64d,4ed9df74eb8aae87b516bb699f56ef05caa111a7..71b18e80389faecd5af1942a91dae3b86f4f816d
@@@ -3467,18 -3467,22 +3467,22 @@@ static inline int __dev_xmit_skb(struc
        qdisc_calculate_pkt_len(skb, q);
  
        if (q->flags & TCQ_F_NOLOCK) {
-               if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
-                       __qdisc_drop(skb, &to_free);
-                       rc = NET_XMIT_DROP;
-               } else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
-                          qdisc_run_begin(q)) {
+               if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
+                   qdisc_run_begin(q)) {
+                       if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
+                                             &q->state))) {
+                               __qdisc_drop(skb, &to_free);
+                               rc = NET_XMIT_DROP;
+                               goto end_run;
+                       }
                        qdisc_bstats_cpu_update(q, skb);
  
+                       rc = NET_XMIT_SUCCESS;
                        if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
                                __qdisc_run(q);
  
+ end_run:
                        qdisc_run_end(q);
-                       rc = NET_XMIT_SUCCESS;
                } else {
                        rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
                        qdisc_run(q);
@@@ -3963,8 -3967,6 +3967,8 @@@ int dev_weight_rx_bias __read_mostly = 
  int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
  int dev_rx_weight __read_mostly = 64;
  int dev_tx_weight __read_mostly = 64;
 +/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
 +int gro_normal_batch __read_mostly = 8;
  
  /* Called with irq disabled */
  static inline void ____napi_schedule(struct softnet_data *sd,
@@@ -5488,7 -5490,7 +5492,7 @@@ static void gro_pull_from_frag0(struct 
        skb->data_len -= grow;
        skb->tail += grow;
  
 -      pinfo->frags[0].page_offset += grow;
 +      skb_frag_off_add(&pinfo->frags[0], grow);
        skb_frag_size_sub(&pinfo->frags[0], grow);
  
        if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
@@@ -5749,26 -5751,6 +5753,26 @@@ struct sk_buff *napi_get_frags(struct n
  }
  EXPORT_SYMBOL(napi_get_frags);
  
 +/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
 +static void gro_normal_list(struct napi_struct *napi)
 +{
 +      if (!napi->rx_count)
 +              return;
 +      netif_receive_skb_list_internal(&napi->rx_list);
 +      INIT_LIST_HEAD(&napi->rx_list);
 +      napi->rx_count = 0;
 +}
 +
 +/* Queue one GRO_NORMAL SKB up for list processing.  If batch size exceeded,
 + * pass the whole batch up to the stack.
 + */
 +static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
 +{
 +      list_add_tail(&skb->list, &napi->rx_list);
 +      if (++napi->rx_count >= gro_normal_batch)
 +              gro_normal_list(napi);
 +}
 +
  static gro_result_t napi_frags_finish(struct napi_struct *napi,
                                      struct sk_buff *skb,
                                      gro_result_t ret)
        case GRO_HELD:
                __skb_push(skb, ETH_HLEN);
                skb->protocol = eth_type_trans(skb, skb->dev);
 -              if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
 -                      ret = GRO_DROP;
 +              if (ret == GRO_NORMAL)
 +                      gro_normal_one(napi, skb);
                break;
  
        case GRO_DROP:
@@@ -6056,8 -6038,6 +6060,8 @@@ bool napi_complete_done(struct napi_str
                                 NAPIF_STATE_IN_BUSY_POLL)))
                return false;
  
 +      gro_normal_list(n);
 +
        if (n->gro_bitmask) {
                unsigned long timeout = 0;
  
@@@ -6143,19 -6123,10 +6147,19 @@@ static void busy_poll_stop(struct napi_
         * Ideally, a new ndo_busy_poll_stop() could avoid another round.
         */
        rc = napi->poll(napi, BUSY_POLL_BUDGET);
 +      /* We can't gro_normal_list() here, because napi->poll() might have
 +       * rearmed the napi (napi_complete_done()) in which case it could
 +       * already be running on another CPU.
 +       */
        trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
        netpoll_poll_unlock(have_poll_lock);
 -      if (rc == BUSY_POLL_BUDGET)
 +      if (rc == BUSY_POLL_BUDGET) {
 +              /* As the whole budget was spent, we still own the napi so can
 +               * safely handle the rx_list.
 +               */
 +              gro_normal_list(napi);
                __napi_schedule(napi);
 +      }
        local_bh_enable();
  }
  
@@@ -6200,7 -6171,6 +6204,7 @@@ restart
                }
                work = napi_poll(napi, BUSY_POLL_BUDGET);
                trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
 +              gro_normal_list(napi);
  count:
                if (work > 0)
                        __NET_ADD_STATS(dev_net(napi->dev),
@@@ -6306,8 -6276,6 +6310,8 @@@ void netif_napi_add(struct net_device *
        napi->timer.function = napi_watchdog;
        init_gro_hash(napi);
        napi->skb = NULL;
 +      INIT_LIST_HEAD(&napi->rx_list);
 +      napi->rx_count = 0;
        napi->poll = poll;
        if (weight > NAPI_POLL_WEIGHT)
                netdev_err_once(dev, "%s() called with weight %d\n", __func__,
@@@ -6404,8 -6372,6 +6408,8 @@@ static int napi_poll(struct napi_struc
                goto out_unlock;
        }
  
 +      gro_normal_list(n);
 +
        if (n->gro_bitmask) {
                /* flush too old packets
                 * If HZ < 1000, flush all packets.
@@@ -8126,15 -8092,12 +8130,15 @@@ int dev_change_xdp_fd(struct net_devic
                bpf_chk = generic_xdp_install;
  
        if (fd >= 0) {
 +              u32 prog_id;
 +
                if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) {
                        NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time");
                        return -EEXIST;
                }
 -              if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
 -                  __dev_xdp_query(dev, bpf_op, query)) {
 +
 +              prog_id = __dev_xdp_query(dev, bpf_op, query);
 +              if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && prog_id) {
                        NL_SET_ERR_MSG(extack, "XDP program already attached");
                        return -EBUSY;
                }
                        bpf_prog_put(prog);
                        return -EINVAL;
                }
 +
 +              if (prog->aux->id == prog_id) {
 +                      bpf_prog_put(prog);
 +                      return 0;
 +              }
 +      } else {
 +              if (!__dev_xdp_query(dev, bpf_op, query))
 +                      return 0;
        }
  
        err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
diff --combined net/dsa/dsa2.c
index b501c90aabe45e16741f17ff663917bc82464a2e,96f787cf9b6e1821dcf7c25df5de598b195fafae..73002022c9d820df95b9df1841ce3724c4d41070
@@@ -254,109 -254,88 +254,109 @@@ static void dsa_tree_teardown_default_c
  
  static int dsa_port_setup(struct dsa_port *dp)
  {
 -      enum devlink_port_flavour flavour;
        struct dsa_switch *ds = dp->ds;
        struct dsa_switch_tree *dst = ds->dst;
 +      const unsigned char *id = (const unsigned char *)&dst->index;
 +      const unsigned char len = sizeof(dst->index);
 +      struct devlink_port *dlp = &dp->devlink_port;
 +      bool dsa_port_link_registered = false;
 +      bool devlink_port_registered = false;
 +      struct devlink *dl = ds->devlink;
 +      bool dsa_port_enabled = false;
        int err = 0;
  
 -      if (dp->type == DSA_PORT_TYPE_UNUSED)
 -              return 0;
 -
 -      memset(&dp->devlink_port, 0, sizeof(dp->devlink_port));
 -      dp->mac = of_get_mac_address(dp->dn);
 -
 -      switch (dp->type) {
 -      case DSA_PORT_TYPE_CPU:
 -              flavour = DEVLINK_PORT_FLAVOUR_CPU;
 -              break;
 -      case DSA_PORT_TYPE_DSA:
 -              flavour = DEVLINK_PORT_FLAVOUR_DSA;
 -              break;
 -      case DSA_PORT_TYPE_USER: /* fall-through */
 -      default:
 -              flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
 -              break;
 -      }
 -
 -      /* dp->index is used now as port_number. However
 -       * CPU and DSA ports should have separate numbering
 -       * independent from front panel port numbers.
 -       */
 -      devlink_port_attrs_set(&dp->devlink_port, flavour,
 -                             dp->index, false, 0,
 -                             (const char *) &dst->index, sizeof(dst->index));
 -      err = devlink_port_register(ds->devlink, &dp->devlink_port,
 -                                  dp->index);
 -      if (err)
 -              return err;
 -
        switch (dp->type) {
        case DSA_PORT_TYPE_UNUSED:
 +              dsa_port_disable(dp);
                break;
        case DSA_PORT_TYPE_CPU:
 +              memset(dlp, 0, sizeof(*dlp));
 +              devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_CPU,
 +                                     dp->index, false, 0, id, len);
 +              err = devlink_port_register(dl, dlp, dp->index);
 +              if (err)
 +                      break;
 +              devlink_port_registered = true;
 +
                err = dsa_port_link_register_of(dp);
                if (err)
 -                      dev_err(ds->dev, "failed to setup link for port %d.%d\n",
 -                              ds->index, dp->index);
 +                      break;
 +              dsa_port_link_registered = true;
 +
 +              err = dsa_port_enable(dp, NULL);
 +              if (err)
 +                      break;
 +              dsa_port_enabled = true;
 +
                break;
        case DSA_PORT_TYPE_DSA:
 +              memset(dlp, 0, sizeof(*dlp));
 +              devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_DSA,
 +                                     dp->index, false, 0, id, len);
 +              err = devlink_port_register(dl, dlp, dp->index);
 +              if (err)
 +                      break;
 +              devlink_port_registered = true;
 +
                err = dsa_port_link_register_of(dp);
                if (err)
 -                      dev_err(ds->dev, "failed to setup link for port %d.%d\n",
 -                              ds->index, dp->index);
 +                      break;
 +              dsa_port_link_registered = true;
 +
 +              err = dsa_port_enable(dp, NULL);
 +              if (err)
 +                      break;
 +              dsa_port_enabled = true;
 +
                break;
        case DSA_PORT_TYPE_USER:
 +              memset(dlp, 0, sizeof(*dlp));
 +              devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_PHYSICAL,
 +                                     dp->index, false, 0, id, len);
 +              err = devlink_port_register(dl, dlp, dp->index);
 +              if (err)
 +                      break;
 +              devlink_port_registered = true;
 +
 +              dp->mac = of_get_mac_address(dp->dn);
                err = dsa_slave_create(dp);
                if (err)
 -                      dev_err(ds->dev, "failed to create slave for port %d.%d\n",
 -                              ds->index, dp->index);
 -              else
 -                      devlink_port_type_eth_set(&dp->devlink_port, dp->slave);
 +                      break;
 +
 +              devlink_port_type_eth_set(dlp, dp->slave);
                break;
        }
  
 -      if (err)
 -              devlink_port_unregister(&dp->devlink_port);
 +      if (err && dsa_port_enabled)
 +              dsa_port_disable(dp);
 +      if (err && dsa_port_link_registered)
 +              dsa_port_link_unregister_of(dp);
 +      if (err && devlink_port_registered)
 +              devlink_port_unregister(dlp);
  
        return err;
  }
  
  static void dsa_port_teardown(struct dsa_port *dp)
  {
 -      if (dp->type != DSA_PORT_TYPE_UNUSED)
 -              devlink_port_unregister(&dp->devlink_port);
 +      struct devlink_port *dlp = &dp->devlink_port;
  
        switch (dp->type) {
        case DSA_PORT_TYPE_UNUSED:
                break;
        case DSA_PORT_TYPE_CPU:
 +              dsa_port_disable(dp);
                dsa_tag_driver_put(dp->tag_ops);
 -              /* fall-through */
 +              devlink_port_unregister(dlp);
 +              dsa_port_link_unregister_of(dp);
 +              break;
        case DSA_PORT_TYPE_DSA:
 +              dsa_port_disable(dp);
 +              devlink_port_unregister(dlp);
                dsa_port_link_unregister_of(dp);
                break;
        case DSA_PORT_TYPE_USER:
 +              devlink_port_unregister(dlp);
                if (dp->slave) {
                        dsa_slave_destroy(dp->slave);
                        dp->slave = NULL;
@@@ -644,6 -623,8 +644,8 @@@ static int dsa_port_parse_cpu(struct ds
        tag_protocol = ds->ops->get_tag_protocol(ds, dp->index);
        tag_ops = dsa_tag_driver_get(tag_protocol);
        if (IS_ERR(tag_ops)) {
+               if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
+                       return -EPROBE_DEFER;
                dev_warn(ds->dev, "No tagger for this switch\n");
                return PTR_ERR(tag_ops);
        }
@@@ -853,6 -834,20 +855,6 @@@ struct dsa_switch *dsa_switch_alloc(str
        if (!ds)
                return NULL;
  
 -      /* We avoid allocating memory outside dsa_switch
 -       * if it is not needed.
 -       */
 -      if (n <= sizeof(ds->_bitmap) * 8) {
 -              ds->bitmap = &ds->_bitmap;
 -      } else {
 -              ds->bitmap = devm_kcalloc(dev,
 -                                        BITS_TO_LONGS(n),
 -                                        sizeof(unsigned long),
 -                                        GFP_KERNEL);
 -              if (unlikely(!ds->bitmap))
 -                      return NULL;
 -      }
 -
        ds->dev = dev;
        ds->num_ports = n;
  
diff --combined net/ipv4/udp.c
index fbcd9be3a470f5f6d2597bb948788171b8b66199,16486c8b708b40f04863a443bde6c7c52e947d11..cf755156a684373f92c639c274f0fb4ab62aa211
@@@ -423,12 -423,13 +423,13 @@@ static struct sock *udp4_lib_lookup2(st
                score = compute_score(sk, net, saddr, sport,
                                      daddr, hnum, dif, sdif);
                if (score > badness) {
-                       if (sk->sk_reuseport) {
+                       if (sk->sk_reuseport &&
+                           sk->sk_state != TCP_ESTABLISHED) {
                                hash = udp_ehashfn(net, daddr, hnum,
                                                   saddr, sport);
                                result = reuseport_select_sock(sk, hash, skb,
                                                        sizeof(struct udphdr));
-                               if (result)
+                               if (result && !reuseport_has_conns(sk, false))
                                        return result;
                        }
                        badness = score;
@@@ -1130,7 -1131,7 +1131,7 @@@ int udp_sendmsg(struct sock *sk, struc
  
                fl4 = &fl4_stack;
  
 -              flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
 +              flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos,
                                   RT_SCOPE_UNIVERSE, sk->sk_protocol,
                                   flow_flags,
                                   faddr, saddr, dport, inet->inet_sport,
diff --combined net/ipv6/udp.c
index 2c8beb3896d17ea8bd482d9106f8a8896cab2053,5995fdc99d3f3ef7b4f9cf59914976d248f28349..aae4938f3deab284ecfd78f0c601647fbb9b03d4
@@@ -158,13 -158,14 +158,14 @@@ static struct sock *udp6_lib_lookup2(st
                score = compute_score(sk, net, saddr, sport,
                                      daddr, hnum, dif, sdif);
                if (score > badness) {
-                       if (sk->sk_reuseport) {
+                       if (sk->sk_reuseport &&
+                           sk->sk_state != TCP_ESTABLISHED) {
                                hash = udp6_ehashfn(net, daddr, hnum,
                                                    saddr, sport);
  
                                result = reuseport_select_sock(sk, hash, skb,
                                                        sizeof(struct udphdr));
-                               if (result)
+                               if (result && !reuseport_has_conns(sk, false))
                                        return result;
                        }
                        result = sk;
@@@ -1230,7 -1231,6 +1231,7 @@@ int udpv6_sendmsg(struct sock *sk, stru
        ipcm6_init(&ipc6);
        ipc6.gso_size = up->gso_size;
        ipc6.sockc.tsflags = sk->sk_tsflags;
 +      ipc6.sockc.mark = sk->sk_mark;
  
        /* destination address check */
        if (sin6) {
@@@ -1353,7 -1353,7 +1354,7 @@@ do_udp_sendmsg
        if (!fl6.flowi6_oif)
                fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
  
 -      fl6.flowi6_mark = sk->sk_mark;
 +      fl6.flowi6_mark = ipc6.sockc.mark;
        fl6.flowi6_uid = sk->sk_uid;
  
        if (msg->msg_controllen) {