Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 8 Mar 2016 17:34:12 +0000 (12:34 -0500)
committerDavid S. Miller <davem@davemloft.net>
Tue, 8 Mar 2016 17:34:12 +0000 (12:34 -0500)
Several cases of overlapping changes, as well as one instance
(vxlan) of a bug fix in 'net' overlapping with code movement
in 'net-next'.

Signed-off-by: David S. Miller <davem@davemloft.net>
56 files changed:
1  2 
MAINTAINERS
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/cavium/thunder/nic.h
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/phy/micrel.c
drivers/net/ppp/ppp_generic.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/of/of_mdio.c
include/linux/mlx5/mlx5_ifc.h
include/linux/perf_event.h
include/linux/skbuff.h
include/linux/stmmac.h
include/uapi/linux/bpf.h
net/core/filter.c
net/core/rtnetlink.c
net/core/skbuff.c
net/ipv4/igmp.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/udp.c
net/mac80211/agg-rx.c
net/mac80211/ieee80211_i.h
net/mac80211/rx.c
net/sched/act_ipt.c
net/sctp/proc.c
net/tipc/socket.c
net/tipc/subscr.c
net/wireless/core.c
net/wireless/nl80211.c
net/wireless/sme.c

diff --combined MAINTAINERS
index be0b56b38720617c0d86a4c6732ceae84b3ee327,4029c63d8a7d4dd68bebf7f923755a9904411ae3..2132c99f7fcde26498d7b59b13f45a4255c4694a
@@@ -151,7 -151,7 +151,7 @@@ S: Maintaine
  F:    drivers/scsi/53c700*
  
  6LOWPAN GENERIC (BTLE/IEEE 802.15.4)
 -M:    Alexander Aring <alex.aring@gmail.com>
 +M:    Alexander Aring <aar@pengutronix.de>
  M:    Jukka Rissanen <jukka.rissanen@linux.intel.com>
  L:    linux-bluetooth@vger.kernel.org
  L:    linux-wpan@vger.kernel.org
@@@ -920,17 -920,24 +920,24 @@@ M:      Emilio López <emilio@elopez.com.ar
  S:    Maintained
  F:    drivers/clk/sunxi/
  
- ARM/Amlogic MesonX SoC support
+ ARM/Amlogic Meson SoC support
  M:    Carlo Caione <carlo@caione.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+ L:    linux-meson@googlegroups.com
+ W:    http://linux-meson.com/
  S:    Maintained
- F:    drivers/media/rc/meson-ir.c
- N:    meson[x68]
+ F:    arch/arm/mach-meson/
+ F:    arch/arm/boot/dts/meson*
+ N:    meson
  
  ARM/Annapurna Labs ALPINE ARCHITECTURE
  M:    Tsahee Zidenberg <tsahee@annapurnalabs.com>
+ M:    Antoine Tenart <antoine.tenart@free-electrons.com>
  S:    Maintained
  F:    arch/arm/mach-alpine/
+ F:    arch/arm/boot/dts/alpine*
+ F:    arch/arm64/boot/dts/al/
+ F:    drivers/*/*alpine*
  
  ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
  M:    Nicolas Ferre <nicolas.ferre@atmel.com>
@@@ -2151,8 -2158,7 +2158,8 @@@ M:      Marek Lindner <mareklindner@neomailb
  M:    Simon Wunderlich <sw@simonwunderlich.de>
  M:    Antonio Quartulli <a@unstable.cc>
  L:    b.a.t.m.a.n@lists.open-mesh.org
 -W:    http://www.open-mesh.org/
 +W:    https://www.open-mesh.org/
 +Q:    https://patchwork.open-mesh.org/project/batman/list/
  S:    Maintained
  F:    net/batman-adv/
  
@@@ -2422,7 -2428,6 +2429,7 @@@ F:      include/linux/bcm963xx_nvram.
  F:    include/linux/bcm963xx_tag.h
  
  BROADCOM TG3 GIGABIT ETHERNET DRIVER
 +M:    Siva Reddy Kallam <siva.kallam@broadcom.com>
  M:    Prashant Sreedharan <prashant@broadcom.com>
  M:    Michael Chan <mchan@broadcom.com>
  L:    netdev@vger.kernel.org
@@@ -3446,7 -3451,6 +3453,6 @@@ F:      drivers/usb/dwc2
  DESIGNWARE USB3 DRD IP DRIVER
  M:    Felipe Balbi <balbi@kernel.org>
  L:    linux-usb@vger.kernel.org
- L:    linux-omap@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
  S:    Maintained
  F:    drivers/usb/dwc3/
@@@ -3500,14 -3504,6 +3506,14 @@@ F:    include/linux/device-mapper.
  F:    include/linux/dm-*.h
  F:    include/uapi/linux/dm-*.h
  
 +DEVLINK
 +M:    Jiri Pirko <jiri@mellanox.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    net/core/devlink.c
 +F:    include/net/devlink.h
 +F:    include/uapi/linux/devlink.h
 +
  DIALOG SEMICONDUCTOR DRIVERS
  M:    Support Opensource <support.opensource@diasemi.com>
  W:    http://www.dialog-semiconductor.com/products
@@@ -4522,6 -4518,12 +4528,12 @@@ L:    linuxppc-dev@lists.ozlabs.or
  S:    Maintained
  F:    drivers/dma/fsldma.*
  
+ FREESCALE GPMI NAND DRIVER
+ M:    Han Xu <han.xu@nxp.com>
+ L:    linux-mtd@lists.infradead.org
+ S:    Maintained
+ F:    drivers/mtd/nand/gpmi-nand/*
  FREESCALE I2C CPM DRIVER
  M:    Jochen Friedrich <jochen@scram.de>
  L:    linuxppc-dev@lists.ozlabs.org
@@@ -4538,7 -4540,7 +4550,7 @@@ F:      include/linux/platform_data/video-im
  F:    drivers/video/fbdev/imxfb.c
  
  FREESCALE QUAD SPI DRIVER
- M:    Han Xu <han.xu@freescale.com>
+ M:    Han Xu <han.xu@nxp.com>
  L:    linux-mtd@lists.infradead.org
  S:    Maintained
  F:    drivers/mtd/spi-nor/fsl-quadspi.c
@@@ -4552,6 -4554,15 +4564,15 @@@ S:    Maintaine
  F:    drivers/net/ethernet/freescale/fs_enet/
  F:    include/linux/fs_enet_pd.h
  
+ FREESCALE IMX / MXC FEC DRIVER
+ M:    Fugang Duan <fugang.duan@nxp.com>
+ L:    netdev@vger.kernel.org
+ S:    Maintained
+ F:    drivers/net/ethernet/freescale/fec_main.c
+ F:    drivers/net/ethernet/freescale/fec_ptp.c
+ F:    drivers/net/ethernet/freescale/fec.h
+ F:    Documentation/devicetree/bindings/net/fsl-fec.txt
  FREESCALE QUICC ENGINE LIBRARY
  L:    linuxppc-dev@lists.ozlabs.org
  S:    Orphan
@@@ -5424,11 -5435,10 +5445,11 @@@ S:   Supporte
  F:    drivers/idle/i7300_idle.c
  
  IEEE 802.15.4 SUBSYSTEM
 -M:    Alexander Aring <alex.aring@gmail.com>
 +M:    Alexander Aring <aar@pengutronix.de>
  L:    linux-wpan@vger.kernel.org
 -W:    https://github.com/linux-wpan
 -T:    git git://github.com/linux-wpan/linux-wpan-next.git
 +W:    http://wpan.cakelab.org/
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
  S:    Maintained
  F:    net/ieee802154/
  F:    net/mac802154/
@@@ -6769,6 -6779,7 +6790,7 @@@ S:      Maintaine
  F:    Documentation/networking/mac80211-injection.txt
  F:    include/net/mac80211.h
  F:    net/mac80211/
+ F:    drivers/net/wireless/mac80211_hwsim.[ch]
  
  MACVLAN DRIVER
  M:    Patrick McHardy <kaber@trash.net>
@@@ -7365,7 -7376,7 +7387,7 @@@ F:      drivers/tty/isicom.
  F:    include/linux/isicom.h
  
  MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
- M:    Felipe Balbi <balbi@kernel.org>
+ M:    Bin Liu <b-liu@ti.com>
  L:    linux-usb@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
  S:    Maintained
@@@ -7501,6 -7512,7 +7523,6 @@@ F:      net/netrom
  
  NETRONOME ETHERNET DRIVERS
  M:    Jakub Kicinski <jakub.kicinski@netronome.com>
 -M:    Rolf Neugebauer <rolf.neugebauer@netronome.com>
  L:    oss-drivers@netronome.com
  S:    Maintained
  F:    drivers/net/ethernet/netronome/
@@@ -7696,13 -7708,13 +7718,13 @@@ S:   Maintaine
  F:    arch/nios2/
  
  NOKIA N900 POWER SUPPLY DRIVERS
- M:    Pali Rohár <pali.rohar@gmail.com>
- S:    Maintained
+ R:    Pali Rohár <pali.rohar@gmail.com>
  F:    include/linux/power/bq2415x_charger.h
  F:    include/linux/power/bq27xxx_battery.h
  F:    include/linux/power/isp1704_charger.h
  F:    drivers/power/bq2415x_charger.c
  F:    drivers/power/bq27xxx_battery.c
+ F:    drivers/power/bq27xxx_battery_i2c.c
  F:    drivers/power/isp1704_charger.c
  F:    drivers/power/rx51_battery.c
  
@@@ -7933,11 -7945,9 +7955,9 @@@ F:     drivers/media/platform/omap3isp
  F:    drivers/staging/media/omap4iss/
  
  OMAP USB SUPPORT
- M:    Felipe Balbi <balbi@kernel.org>
  L:    linux-usb@vger.kernel.org
  L:    linux-omap@vger.kernel.org
- T:    git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
- S:    Maintained
+ S:    Orphan
  F:    drivers/usb/*/*omap*
  F:    arch/arm/*omap*/usb*
  
@@@ -9076,14 -9086,10 +9096,14 @@@ S:   Maintaine
  F:    drivers/net/ethernet/rdc/r6040.c
  
  RDS - RELIABLE DATAGRAM SOCKETS
 -M:    Chien Yen <chien.yen@oracle.com>
 +M:    Santosh Shilimkar <santosh.shilimkar@oracle.com>
 +L:    netdev@vger.kernel.org
 +L:    linux-rdma@vger.kernel.org
  L:    rds-devel@oss.oracle.com (moderated for non-subscribers)
 +W:    https://oss.oracle.com/projects/rds/
  S:    Supported
  F:    net/rds/
 +F:    Documentation/networking/rds.txt
  
  READ-COPY UPDATE (RCU)
  M:    "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
@@@ -9572,6 -9578,12 +9592,12 @@@ M:    Andreas Noever <andreas.noever@gmail
  S:    Maintained
  F:    drivers/thunderbolt/
  
+ TI BQ27XXX POWER SUPPLY DRIVER
+ R:    Andrew F. Davis <afd@ti.com>
+ F:    include/linux/power/bq27xxx_battery.h
+ F:    drivers/power/bq27xxx_battery.c
+ F:    drivers/power/bq27xxx_battery_i2c.c
  TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
  M:    John Stultz <john.stultz@linaro.org>
  M:    Thomas Gleixner <tglx@linutronix.de>
@@@ -11345,13 -11357,6 +11371,13 @@@ S: Maintaine
  F:    drivers/usb/host/isp116x*
  F:    include/linux/usb/isp116x.h
  
 +USB LAN78XX ETHERNET DRIVER
 +M:    Woojung Huh <woojung.huh@microchip.com>
 +M:    Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/usb/lan78xx.*
 +
  USB MASS STORAGE DRIVER
  M:    Matthew Dharm <mdharm-usb@one-eyed-alien.net>
  L:    linux-usb@vger.kernel.org
index 7b881edc0b0af565212b41d79e0c1768ae61e7e8,17b2126075e01afde20ee2de1718dfc7db1e4641..d81fceddbe0e86d2f6489c3210051fe7265a6560
@@@ -1601,9 -1601,15 +1601,9 @@@ vortex_up(struct net_device *dev
                                dev->name, media_tbl[dev->if_port].name);
        }
  
 -      init_timer(&vp->timer);
 -      vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
 -      vp->timer.data = (unsigned long)dev;
 -      vp->timer.function = vortex_timer;              /* timer handler */
 -      add_timer(&vp->timer);
 -
 -      init_timer(&vp->rx_oom_timer);
 -      vp->rx_oom_timer.data = (unsigned long)dev;
 -      vp->rx_oom_timer.function = rx_oom_timer;
 +      setup_timer(&vp->timer, vortex_timer, (unsigned long)dev);
 +      mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
 +      setup_timer(&vp->rx_oom_timer, rx_oom_timer, (unsigned long)dev);
  
        if (vortex_debug > 1)
                pr_debug("%s: Initial media type %s.\n",
@@@ -2455,7 -2461,7 +2455,7 @@@ boomerang_interrupt(int irq, void *dev_
                                        int i;
                                        pci_unmap_single(VORTEX_PCI(vp),
                                                        le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
-                                                       le32_to_cpu(vp->tx_ring[entry].frag[0].length),
+                                                       le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
                                                        PCI_DMA_TODEVICE);
  
                                        for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
index 04523d41b87385c941cebe020368c9341f17a0e3,91874d24fd560c2d25afbf7ab7e3251774627a5b..f8b8103130947a5c47d760ce70eb4e1e7af083b6
@@@ -1824,22 -1824,17 +1824,22 @@@ struct dcbx_app_priority_entry 
        u8  pri_bitmap;
        u8  appBitfield;
        #define DCBX_APP_ENTRY_VALID         0x01
 -      #define DCBX_APP_ENTRY_SF_MASK       0x30
 +      #define DCBX_APP_ENTRY_SF_MASK       0xF0
        #define DCBX_APP_ENTRY_SF_SHIFT      4
        #define DCBX_APP_SF_ETH_TYPE         0x10
        #define DCBX_APP_SF_PORT             0x20
 +      #define DCBX_APP_SF_UDP              0x40
 +      #define DCBX_APP_SF_DEFAULT          0x80
  #elif defined(__LITTLE_ENDIAN)
        u8 appBitfield;
        #define DCBX_APP_ENTRY_VALID         0x01
 -      #define DCBX_APP_ENTRY_SF_MASK       0x30
 +      #define DCBX_APP_ENTRY_SF_MASK       0xF0
        #define DCBX_APP_ENTRY_SF_SHIFT      4
 +      #define DCBX_APP_ENTRY_VALID         0x01
        #define DCBX_APP_SF_ETH_TYPE         0x10
        #define DCBX_APP_SF_PORT             0x20
 +      #define DCBX_APP_SF_UDP              0x40
 +      #define DCBX_APP_SF_DEFAULT          0x80
        u8  pri_bitmap;
        u16  app_id;
  #endif
@@@ -4901,9 -4896,9 +4901,9 @@@ struct c2s_pri_trans_table_entry 
   * cfc delete event data
   */
  struct cfc_del_event_data {
-       u32 cid;
-       u32 reserved0;
-       u32 reserved1;
+       __le32 cid;
+       __le32 reserved0;
+       __le32 reserved1;
  };
  
  
@@@ -5119,15 -5114,9 +5119,9 @@@ struct vf_pf_channel_zone_trigger 
   * zone that triggers the in-bound interrupt
   */
  struct trigger_vf_zone {
- #if defined(__BIG_ENDIAN)
-       u16 reserved1;
-       u8 reserved0;
-       struct vf_pf_channel_zone_trigger vf_pf_channel;
- #elif defined(__LITTLE_ENDIAN)
        struct vf_pf_channel_zone_trigger vf_pf_channel;
        u8 reserved0;
        u16 reserved1;
- #endif
        u32 reserved2;
  };
  
@@@ -5212,9 -5201,9 +5206,9 @@@ struct e2_integ_data 
   * set mac event data
   */
  struct eth_event_data {
-       u32 echo;
-       u32 reserved0;
-       u32 reserved1;
+       __le32 echo;
+       __le32 reserved0;
+       __le32 reserved1;
  };
  
  
  struct vf_pf_event_data {
        u8 vf_id;
        u8 reserved0;
-       u16 reserved1;
-       u32 msg_addr_lo;
-       u32 msg_addr_hi;
+       __le16 reserved1;
+       __le32 msg_addr_lo;
+       __le32 msg_addr_hi;
  };
  
  /*
  struct vf_flr_event_data {
        u8 vf_id;
        u8 reserved0;
-       u16 reserved1;
-       u32 reserved2;
-       u32 reserved3;
+       __le16 reserved1;
+       __le32 reserved2;
+       __le32 reserved3;
  };
  
  /*
  struct malicious_vf_event_data {
        u8 vf_id;
        u8 err_id;
-       u16 reserved1;
-       u32 reserved2;
-       u32 reserved3;
+       __le16 reserved1;
+       __le32 reserved2;
+       __le32 reserved3;
  };
  
  /*
index 5c95d0c3b0761c147f0b36b82e9fd56da8eeeeca,2bf9c871144f714c56a72e4e584aa4db431d218c..b597c32275aa05797d3e9604a214577d623671d5
@@@ -59,9 -59,7 +59,9 @@@
  #include <linux/semaphore.h>
  #include <linux/stringify.h>
  #include <linux/vmalloc.h>
 -
 +#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
 +#include <net/geneve.h>
 +#endif
  #include "bnx2x.h"
  #include "bnx2x_init.h"
  #include "bnx2x_init_ops.h"
@@@ -5282,14 -5280,14 +5282,14 @@@ static void bnx2x_handle_classification
  {
        unsigned long ramrod_flags = 0;
        int rc = 0;
-       u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+       u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
+       u32 cid = echo & BNX2X_SWCID_MASK;
        struct bnx2x_vlan_mac_obj *vlan_mac_obj;
  
        /* Always push next commands out, don't wait here */
        __set_bit(RAMROD_CONT, &ramrod_flags);
  
-       switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
-                           >> BNX2X_SWCID_SHIFT) {
+       switch (echo >> BNX2X_SWCID_SHIFT) {
        case BNX2X_FILTER_MAC_PENDING:
                DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
                if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
                bnx2x_handle_mcast_eqe(bp);
                return;
        default:
-               BNX2X_ERR("Unsupported classification command: %d\n",
-                         elem->message.data.eth_event.echo);
+               BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
                return;
        }
  
@@@ -5480,9 -5477,6 +5479,6 @@@ static void bnx2x_eq_int(struct bnx2x *
                        goto next_spqe;
                }
  
-               /* elem CID originates from FW; actually LE */
-               cid = SW_CID((__force __le32)
-                            elem->message.data.cfc_del_event.cid);
                opcode = elem->message.opcode;
  
                /* handle eq element */
                         * we may want to verify here that the bp state is
                         * HALTING
                         */
+                       /* elem CID originates from FW; actually LE */
+                       cid = SW_CID(elem->message.data.cfc_del_event.cid);
                        DP(BNX2X_MSG_SP,
                           "got delete ramrod for MULTI[%d]\n", cid);
  
                      BNX2X_STATE_OPENING_WAIT4_PORT):
                case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
                      BNX2X_STATE_CLOSING_WAIT4_HALT):
-                       cid = elem->message.data.eth_event.echo &
-                               BNX2X_SWCID_MASK;
                        DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
-                          cid);
+                          SW_CID(elem->message.data.eth_event.echo));
                        rss_raw->clear_pending(rss_raw);
                        break;
  
@@@ -5686,7 -5682,7 +5684,7 @@@ static void bnx2x_sp_task(struct work_s
                if (status & BNX2X_DEF_SB_IDX) {
                        struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
  
-               if (FCOE_INIT(bp) &&
+                       if (FCOE_INIT(bp) &&
                            (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
                                /* Prevent local bottom-halves from running as
                                 * we are going to change the local NAPI list.
@@@ -10078,13 -10074,11 +10076,13 @@@ static void bnx2x_parity_recover(struc
        }
  }
  
 -#ifdef CONFIG_BNX2X_VXLAN
 -static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port)
 +#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
 +static int bnx2x_udp_port_update(struct bnx2x *bp)
  {
        struct bnx2x_func_switch_update_params *switch_update_params;
        struct bnx2x_func_state_params func_params = {NULL};
 +      struct bnx2x_udp_tunnel *udp_tunnel;
 +      u16 vxlan_port = 0, geneve_port = 0;
        int rc;
  
        switch_update_params = &func_params.params.switch_update;
        /* Function parameters */
        __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
                  &switch_update_params->changes);
 -      switch_update_params->vxlan_dst_port = port;
 +
 +      if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
 +              udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
 +              geneve_port = udp_tunnel->dst_port;
 +              switch_update_params->geneve_dst_port = geneve_port;
 +      }
 +
 +      if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
 +              udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
 +              vxlan_port = udp_tunnel->dst_port;
 +              switch_update_params->vxlan_dst_port = vxlan_port;
 +      }
 +
 +      /* Re-enable inner-rss for the offloaded UDP tunnels */
 +      __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
 +                &switch_update_params->changes);
 +
        rc = bnx2x_func_state_change(bp, &func_params);
        if (rc)
 -              BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n",
 -                        port, rc);
 +              BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
 +                        vxlan_port, geneve_port, rc);
 +      else
 +              DP(BNX2X_MSG_SP,
 +                 "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
 +                 vxlan_port, geneve_port);
 +
        return rc;
  }
  
 -static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
 +static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
 +                               enum bnx2x_udp_port_type type)
  {
 -      if (!netif_running(bp->dev))
 +      struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
 +
 +      if (!netif_running(bp->dev) || !IS_PF(bp))
 +              return;
 +
 +      if (udp_port->count && udp_port->dst_port == port) {
 +              udp_port->count++;
                return;
 +      }
  
 -      if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) {
 -              bp->vxlan_dst_port_count++;
 +      if (udp_port->count) {
 +              DP(BNX2X_MSG_SP,
 +                 "UDP tunnel [%d] -  destination port limit reached\n",
 +                 type);
                return;
        }
  
 -      if (bp->vxlan_dst_port_count || !IS_PF(bp)) {
 -              DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
 +      udp_port->dst_port = port;
 +      udp_port->count = 1;
 +      bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
 +}
 +
 +static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
 +                               enum bnx2x_udp_port_type type)
 +{
 +      struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
 +
 +      if (!IS_PF(bp))
 +              return;
 +
 +      if (!udp_port->count || udp_port->dst_port != port) {
 +              DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n",
 +                 type);
                return;
        }
  
 -      bp->vxlan_dst_port = port;
 -      bp->vxlan_dst_port_count = 1;
 -      bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
 +      /* Remove reference, and make certain it's no longer in use */
 +      udp_port->count--;
 +      if (udp_port->count)
 +              return;
 +      udp_port->dst_port = 0;
 +
 +      if (netif_running(bp->dev))
 +              bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
 +      else
 +              DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
 +                 type, port);
  }
 +#endif
  
 +#ifdef CONFIG_BNX2X_VXLAN
  static void bnx2x_add_vxlan_port(struct net_device *netdev,
                                 sa_family_t sa_family, __be16 port)
  {
        struct bnx2x *bp = netdev_priv(netdev);
        u16 t_port = ntohs(port);
  
 -      __bnx2x_add_vxlan_port(bp, t_port);
 +      __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
  }
  
 -static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
 +static void bnx2x_del_vxlan_port(struct net_device *netdev,
 +                               sa_family_t sa_family, __be16 port)
  {
 -      if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port ||
 -          !IS_PF(bp)) {
 -              DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
 -              return;
 -      }
 -      bp->vxlan_dst_port_count--;
 -      if (bp->vxlan_dst_port_count)
 -              return;
 +      struct bnx2x *bp = netdev_priv(netdev);
 +      u16 t_port = ntohs(port);
  
 -      if (netif_running(bp->dev)) {
 -              bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
 -      } else {
 -              bp->vxlan_dst_port = 0;
 -              netdev_info(bp->dev, "Deleted vxlan dest port %d", port);
 -      }
 +      __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
 +}
 +#endif
 +
 +#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
 +static void bnx2x_add_geneve_port(struct net_device *netdev,
 +                                sa_family_t sa_family, __be16 port)
 +{
 +      struct bnx2x *bp = netdev_priv(netdev);
 +      u16 t_port = ntohs(port);
 +
 +      __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
  }
  
 -static void bnx2x_del_vxlan_port(struct net_device *netdev,
 -                               sa_family_t sa_family, __be16 port)
 +static void bnx2x_del_geneve_port(struct net_device *netdev,
 +                                sa_family_t sa_family, __be16 port)
  {
        struct bnx2x *bp = netdev_priv(netdev);
        u16 t_port = ntohs(port);
  
 -      __bnx2x_del_vxlan_port(bp, t_port);
 +      __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
  }
  #endif
  
@@@ -10229,6 -10167,9 +10227,6 @@@ static int bnx2x_close(struct net_devic
  static void bnx2x_sp_rtnl_task(struct work_struct *work)
  {
        struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
 -#ifdef CONFIG_BNX2X_VXLAN
 -      u16 port;
 -#endif
  
        rtnl_lock();
  
@@@ -10327,27 -10268,23 +10325,27 @@@ sp_rtnl_not_reset
                               &bp->sp_rtnl_state))
                bnx2x_update_mng_version(bp);
  
 -#ifdef CONFIG_BNX2X_VXLAN
 -      port = bp->vxlan_dst_port;
 -      if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT,
 -                             &bp->sp_rtnl_state)) {
 -              if (!bnx2x_vxlan_port_update(bp, port))
 -                      netdev_info(bp->dev, "Added vxlan dest port %d", port);
 -              else
 -                      bp->vxlan_dst_port = 0;
 -      }
 -
 -      if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT,
 +#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
 +      if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
                               &bp->sp_rtnl_state)) {
 -              if (!bnx2x_vxlan_port_update(bp, 0)) {
 -                      netdev_info(bp->dev,
 -                                  "Deleted vxlan dest port %d", port);
 -                      bp->vxlan_dst_port = 0;
 -                      vxlan_get_rx_port(bp->dev);
 +              if (bnx2x_udp_port_update(bp)) {
 +                      /* On error, forget configuration */
 +                      memset(bp->udp_tunnel_ports, 0,
 +                             sizeof(struct bnx2x_udp_tunnel) *
 +                             BNX2X_UDP_PORT_MAX);
 +              } else {
 +                      /* Since we don't store additional port information,
 +                       * if no port is configured for any feature ask for
 +                       * information about currently configured ports.
 +                       */
 +#ifdef CONFIG_BNX2X_VXLAN
 +                      if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count)
 +                              vxlan_get_rx_port(bp->dev);
 +#endif
 +#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
 +                      if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
 +                              geneve_get_rx_port(bp->dev);
 +#endif
                }
        }
  #endif
@@@ -12429,10 -12366,8 +12427,10 @@@ static int bnx2x_init_bp(struct bnx2x *
  
        if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
            SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
 +          SHMEM2_HAS(bp, dcbx_en) &&
            SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
 -          SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
 +          SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
 +          SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
                bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
                bnx2x_dcbx_init_params(bp);
        } else {
@@@ -12557,10 -12492,6 +12555,10 @@@ static int bnx2x_open(struct net_devic
        if (IS_PF(bp))
                vxlan_get_rx_port(dev);
  #endif
 +#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
 +      if (IS_PF(bp))
 +              geneve_get_rx_port(dev);
 +#endif
  
        return 0;
  }
@@@ -13061,7 -12992,7 +13059,7 @@@ static const struct net_device_ops bnx2
  #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = poll_bnx2x,
  #endif
 -      .ndo_setup_tc           = bnx2x_setup_tc,
 +      .ndo_setup_tc           = __bnx2x_setup_tc,
  #ifdef CONFIG_BNX2X_SRIOV
        .ndo_set_vf_mac         = bnx2x_set_vf_mac,
        .ndo_set_vf_vlan        = bnx2x_set_vf_vlan,
        .ndo_add_vxlan_port     = bnx2x_add_vxlan_port,
        .ndo_del_vxlan_port     = bnx2x_del_vxlan_port,
  #endif
 +#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
 +      .ndo_add_geneve_port    = bnx2x_add_geneve_port,
 +      .ndo_del_geneve_port    = bnx2x_del_geneve_port,
 +#endif
  };
  
  static int bnx2x_set_coherency_mask(struct bnx2x *bp)
index ce6b075842ee592ee4757ebde82887eef963fb36,82f191382989b04877177075ea1906f0e941dfb9..4dfc25f042e805b4c463117d1aec49bb096dff0a
@@@ -248,7 -248,8 +248,8 @@@ static netdev_tx_t bnxt_start_xmit(stru
                tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
                tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
  
-               end = PTR_ALIGN(pdata + length + 1, 8) - 1;
+               end = pdata + length;
+               end = PTR_ALIGN(end, 8) - 1;
                *end = 0;
  
                skb_copy_from_linear_data(skb, pdata, len);
@@@ -1239,17 -1240,13 +1240,17 @@@ static int bnxt_async_event_process(str
        switch (event_id) {
        case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
                set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
 -              schedule_work(&bp->sp_task);
 +              break;
 +      case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
 +              set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
                break;
        default:
                netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
                           event_id);
 -              break;
 +              goto async_event_process_exit;
        }
 +      schedule_work(&bp->sp_task);
 +async_event_process_exit:
        return 0;
  }
  
@@@ -2600,27 -2597,28 +2601,27 @@@ alloc_mem_err
  void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
                            u16 cmpl_ring, u16 target_id)
  {
 -      struct hwrm_cmd_req_hdr *req = request;
 +      struct input *req = request;
  
 -      req->cmpl_ring_req_type =
 -              cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
 -      req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
 +      req->req_type = cpu_to_le16(req_type);
 +      req->cmpl_ring = cpu_to_le16(cmpl_ring);
 +      req->target_id = cpu_to_le16(target_id);
        req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
  }
  
 -int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
 +static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
 +                               int timeout, bool silent)
  {
        int i, intr_process, rc;
 -      struct hwrm_cmd_req_hdr *req = msg;
 +      struct input *req = msg;
        u32 *data = msg;
        __le32 *resp_len, *valid;
        u16 cp_ring_id, len = 0;
        struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
  
 -      req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
 +      req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
        memset(resp, 0, PAGE_SIZE);
 -      cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
 -                    HWRM_CMPL_RING_MASK) >>
 -                   HWRM_CMPL_RING_SFT;
 +      cp_ring_id = le16_to_cpu(req->cmpl_ring);
        intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
  
        /* Write request msg to hwrm channel */
  
        /* currently supports only one outstanding message */
        if (intr_process)
 -              bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
 -                                     HWRM_SEQ_ID_MASK;
 +              bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
  
        /* Ring channel doorbell */
        writel(1, bp->bar0 + 0x100);
  
 +      if (!timeout)
 +              timeout = DFLT_HWRM_CMD_TIMEOUT;
 +
        i = 0;
        if (intr_process) {
                /* Wait until hwrm response cmpl interrupt is processed */
  
                if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
                        netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
 -                                 req->cmpl_ring_req_type);
 +                                 le16_to_cpu(req->req_type));
                        return -1;
                }
        } else {
  
                if (i >= timeout) {
                        netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
 -                                 timeout, req->cmpl_ring_req_type,
 -                                 req->target_id_seq_id, *resp_len);
 +                                 timeout, le16_to_cpu(req->req_type),
 +                                 le16_to_cpu(req->seq_id), *resp_len);
                        return -1;
                }
  
  
                if (i >= timeout) {
                        netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
 -                                 timeout, req->cmpl_ring_req_type,
 -                                 req->target_id_seq_id, len, *valid);
 +                                 timeout, le16_to_cpu(req->req_type),
 +                                 le16_to_cpu(req->seq_id), len, *valid);
                        return -1;
                }
        }
  
        rc = le16_to_cpu(resp->error_code);
 -      if (rc) {
 +      if (rc && !silent)
                netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
                           le16_to_cpu(resp->req_type),
                           le16_to_cpu(resp->seq_id), rc);
 -              return rc;
 -      }
 -      return 0;
 +      return rc;
 +}
 +
 +int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
 +{
 +      return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
  }
  
  int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
        return rc;
  }
  
 +int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
 +                           int timeout)
 +{
 +      int rc;
 +
 +      mutex_lock(&bp->hwrm_cmd_lock);
 +      rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
 +      mutex_unlock(&bp->hwrm_cmd_lock);
 +      return rc;
 +}
 +
  static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
  {
        struct hwrm_func_drv_rgtr_input req = {0};
@@@ -3536,82 -3518,47 +3537,82 @@@ static void bnxt_hwrm_ring_free(struct 
        }
  }
  
 +static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
 +      u32 buf_tmrs, u16 flags,
 +      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
 +{
 +      req->flags = cpu_to_le16(flags);
 +      req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
 +      req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
 +      req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
 +      req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
 +      /* Minimum time between 2 interrupts set to buf_tmr x 2 */
 +      req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
 +      req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
 +      req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
 +}
 +
  int bnxt_hwrm_set_coal(struct bnxt *bp)
  {
        int i, rc = 0;
 -      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
 +      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
 +                                                         req_tx = {0}, *req;
        u16 max_buf, max_buf_irq;
        u16 buf_tmr, buf_tmr_irq;
        u32 flags;
  
 -      bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
 -                             -1, -1);
 +      bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
 +                             HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
 +      bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
 +                             HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
  
 -      /* Each rx completion (2 records) should be DMAed immediately */
 -      max_buf = min_t(u16, bp->coal_bufs / 4, 2);
 +      /* Each rx completion (2 records) should be DMAed immediately.
 +       * DMA 1/4 of the completion buffers at a time.
 +       */
 +      max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
        /* max_buf must not be zero */
        max_buf = clamp_t(u16, max_buf, 1, 63);
 -      max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
 -      buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
 -      buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
 +      max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
 +      buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
 +      /* buf timer set to 1/4 of interrupt timer */
 +      buf_tmr = max_t(u16, buf_tmr / 4, 1);
 +      buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
 +      buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
  
        flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
  
        /* RING_IDLE generates more IRQs for lower latency.  Enable it only
         * if coal_ticks is less than 25 us.
         */
 -      if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
 +      if (bp->rx_coal_ticks < 25)
                flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
  
 -      req.flags = cpu_to_le16(flags);
 -      req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
 -      req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
 -      req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
 -      req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
 -      req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
 -      req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
 -      req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
 +      bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
 +                                buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
 +
 +      /* max_buf must not be zero */
 +      max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
 +      max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
 +      buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
 +      /* buf timer set to 1/4 of interrupt timer */
 +      buf_tmr = max_t(u16, buf_tmr / 4, 1);
 +      buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
 +      buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
 +
 +      flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 +      bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
 +                                buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
  
        mutex_lock(&bp->hwrm_cmd_lock);
        for (i = 0; i < bp->cp_nr_rings; i++) {
 -              req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
 +              struct bnxt_napi *bnapi = bp->bnapi[i];
  
 -              rc = _hwrm_send_message(bp, &req, sizeof(req),
 +              req = &req_rx;
 +              if (!bnapi->rx_ring)
 +                      req = &req_tx;
 +              req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
 +
 +              rc = _hwrm_send_message(bp, req, sizeof(*req),
                                        HWRM_CMD_TIMEOUT);
                if (rc)
                        break;
@@@ -3820,14 -3767,10 +3821,14 @@@ static int bnxt_hwrm_ver_get(struct bnx
                            resp->hwrm_intf_upd);
                netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
        }
 -      snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
 +      snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
                 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
  
 +      bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
 +      if (!bp->hwrm_cmd_timeout)
 +              bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
 +
  hwrm_ver_get_exit:
        mutex_unlock(&bp->hwrm_cmd_lock);
        return rc;
@@@ -5349,16 -5292,10 +5350,16 @@@ static int bnxt_init_board(struct pci_d
        bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
        bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
  
 -      bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
 -      bp->coal_bufs = 20;
 -      bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
 -      bp->coal_bufs_irq = 2;
 +      /* tick values in micro seconds */
 +      bp->rx_coal_ticks = 12;
 +      bp->rx_coal_bufs = 30;
 +      bp->rx_coal_ticks_irq = 1;
 +      bp->rx_coal_bufs_irq = 2;
 +
 +      bp->tx_coal_ticks = 25;
 +      bp->tx_coal_bufs = 30;
 +      bp->tx_coal_ticks_irq = 2;
 +      bp->tx_coal_bufs_irq = 2;
  
        init_timer(&bp->timer);
        bp->timer.data = (unsigned long)bp;
@@@ -5441,16 -5378,9 +5442,16 @@@ static int bnxt_change_mtu(struct net_d
        return 0;
  }
  
 -static int bnxt_setup_tc(struct net_device *dev, u8 tc)
 +static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
 +                       struct tc_to_netdev *ntc)
  {
        struct bnxt *bp = netdev_priv(dev);
 +      u8 tc;
 +
 +      if (ntc->type != TC_SETUP_MQPRIO)
 +              return -EINVAL;
 +
 +      tc = ntc->tc;
  
        if (tc > bp->max_tc) {
                netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
@@@ -5623,8 -5553,6 +5624,8 @@@ static void bnxt_cfg_ntp_filters(struc
                        }
                }
        }
 +      if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
 +              netdev_info(bp->dev, "Receive PF driver unload event!");
  }
  
  #else
@@@ -5740,6 -5668,7 +5741,6 @@@ static int bnxt_probe_phy(struct bnxt *
  {
        int rc = 0;
        struct bnxt_link_info *link_info = &bp->link_info;
 -      char phy_ver[PHY_VER_STR_LEN];
  
        rc = bnxt_update_link(bp, false);
        if (rc) {
                link_info->req_duplex = link_info->duplex_setting;
                link_info->req_flow_ctrl = link_info->force_pause_setting;
        }
 -      snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
 -               link_info->phy_ver[0],
 -               link_info->phy_ver[1],
 -               link_info->phy_ver[2]);
 -      strcat(bp->fw_ver_str, phy_ver);
        return rc;
  }
  
index 00cc9156abbbc06263a0ca146c7d94b1d75710f1,34e9acea87479db83b852c4e233c3e26b532a6a1..092f097a59432ee30e207d355031a722a682517a
  #define NIC_PF_INTR_ID_MBOX0          8
  #define NIC_PF_INTR_ID_MBOX1          9
  
+ /* Minimum FIFO level before all packets for the CQ are dropped
+  *
+  * This value ensures that once a packet has been "accepted"
+  * for reception it will not get dropped due to non-availability
+  * of CQ descriptor. An errata in HW mandates this value to be
+  * atleast 0x100.
+  */
+ #define NICPF_CQM_MIN_DROP_LEVEL       0x100
  /* Global timer for CQ timer thresh interrupts
   * Calculated for SCLK of 700Mhz
   * value written should be a 1/16th of what is expected
@@@ -248,13 -257,10 +257,13 @@@ struct nicvf_drv_stats 
        u64 rx_frames_jumbo;
        u64 rx_drops;
  
 +      u64 rcv_buffer_alloc_failures;
 +
        /* Tx */
        u64 tx_frames_ok;
        u64 tx_drops;
        u64 tx_tso;
 +      u64 tx_timeout;
        u64 txq_stop;
        u64 txq_wake;
  };
@@@ -309,7 -315,6 +318,7 @@@ struct nicvf 
        struct msix_entry       msix_entries[NIC_VF_MSIX_VECTORS];
        char                    irq_name[NIC_VF_MSIX_VECTORS][20];
        bool                    irq_allocated[NIC_VF_MSIX_VECTORS];
 +      cpumask_var_t           affinity_mask[NIC_VF_MSIX_VECTORS];
  
        /* VF <-> PF mailbox communication */
        bool                    pf_acked;
index ee584c59ff628ce1acb4f92d8d67cc299fd11ce2,f9751294ece79e54eff07e20ad9122f585bda32a..fe3763df3f130ce98ddfc0868debc193144c4b3d
@@@ -72,9 -72,6 +72,9 @@@
  #define BE_MAX_MTU              (BE_MAX_JUMBO_FRAME_SIZE -    \
                                 (ETH_HLEN + ETH_FCS_LEN))
  
 +/* Accommodate for QnQ configurations where VLAN insertion is enabled in HW */
 +#define BE_MAX_GSO_SIZE               (65535 - 2 * VLAN_HLEN)
 +
  #define BE_NUM_VLANS_SUPPORTED        64
  #define BE_MAX_EQD            128u
  #define       BE_MAX_TX_FRAG_COUNT    30
  #define BE3_MAX_TX_QS         16
  #define BE3_MAX_EVT_QS                16
  #define BE3_SRIOV_MAX_EVT_QS  8
 +#define SH_VF_MAX_NIC_EQS     3       /* Skyhawk VFs can have a max of 4 EQs
 +                                       * and at least 1 is granted to either
 +                                       * SURF/DPDK
 +                                       */
  
  #define MAX_RSS_IFACES                15
  #define MAX_RX_QS             32
  #define       RSS_INDIR_TABLE_LEN     128
  #define RSS_HASH_KEY_LEN      40
  
 +#define BE_UNKNOWN_PHY_STATE  0xFF
 +
  struct be_dma_mem {
        void *va;
        dma_addr_t dma;
  };
  
  struct be_queue_info {
 +      u32 len;
 +      u32 entry_size; /* Size of an element in the queue */
 +      u32 tail, head;
 +      atomic_t used;  /* Number of valid elements in the queue */
 +      u32 id;
        struct be_dma_mem dma_mem;
 -      u16 len;
 -      u16 entry_size; /* Size of an element in the queue */
 -      u16 id;
 -      u16 tail, head;
        bool created;
 -      atomic_t used;  /* Number of valid elements in the queue */
  };
  
 -static inline u32 MODULO(u16 val, u16 limit)
 +static inline u32 MODULO(u32 val, u32 limit)
  {
        BUG_ON(limit & (limit - 1));
        return val & (limit - 1);
  }
  
 -static inline void index_adv(u16 *index, u16 val, u16 limit)
 +static inline void index_adv(u32 *index, u32 val, u32 limit)
  {
        *index = MODULO((*index + val), limit);
  }
  
 -static inline void index_inc(u16 *index, u16 limit)
 +static inline void index_inc(u32 *index, u32 limit)
  {
        *index = MODULO((*index + 1), limit);
  }
@@@ -172,7 -163,7 +172,7 @@@ static inline void queue_head_inc(struc
        index_inc(&q->head, q->len);
  }
  
 -static inline void index_dec(u16 *index, u16 limit)
 +static inline void index_dec(u32 *index, u32 limit)
  {
        *index = MODULO((*index - 1), limit);
  }
@@@ -395,17 -386,13 +395,17 @@@ enum vf_state 
  #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD           BIT(7)
  #define BE_FLAGS_VXLAN_OFFLOADS                       BIT(8)
  #define BE_FLAGS_SETUP_DONE                   BIT(9)
 -#define BE_FLAGS_EVT_INCOMPATIBLE_SFP         BIT(10)
 +#define BE_FLAGS_PHY_MISCONFIGURED            BIT(10)
  #define BE_FLAGS_ERR_DETECTION_SCHEDULED      BIT(11)
  #define BE_FLAGS_OS2BMC                               BIT(12)
  
  #define BE_UC_PMAC_COUNT                      30
  #define BE_VF_UC_PMAC_COUNT                   2
  
 +#define MAX_ERR_RECOVERY_RETRY_COUNT          3
 +#define ERR_DETECTION_DELAY                   1000
 +#define ERR_RECOVERY_RETRY_DELAY              30000
 +
  /* Ethtool set_dump flags */
  #define LANCER_INITIATE_FW_DUMP                       0x1
  #define LANCER_DELETE_FW_DUMP                 0x2
@@@ -543,8 -530,8 +543,9 @@@ struct be_adapter 
        u16 work_counter;
  
        struct delayed_work be_err_detection_work;
 +      u8 recovery_retries;
        u8 err_flags;
+       bool pcicfg_mapped;     /* pcicfg obtained via pci_iomap() */
        u32 flags;
        u32 cmd_privileges;
        /* Ethtool knobs and info */
        u32 bmc_filt_mask;
        u32 fat_dump_len;
        u16 serial_num[CNTL_SERIAL_NUM_WORDS];
 +      u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */
  };
  
  #define be_physfn(adapter)            (!adapter->virtfn)
index 0b9f741f31af52b1a40404199a78b355b8db725c,6d9a8d78e8ad8413f075960ee45f0033e222861c..d8540ae95e5aeea1dbcee207d6f13374d34852e6
@@@ -68,8 -68,7 +68,8 @@@ enum mcc_addl_status 
        MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a,
        MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab,
        MCC_ADDL_STATUS_INVALID_SIGNATURE = 0x56,
 -      MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57
 +      MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57,
 +      MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES = 0x60
  };
  
  #define CQE_BASE_STATUS_MASK          0xFFFF
@@@ -176,53 -175,10 +176,53 @@@ struct be_async_event_qnq 
        u32 flags;
  } __packed;
  
 -#define INCOMPATIBLE_SFP              0x3
 +enum {
 +      BE_PHY_FUNCTIONAL       = 0,
 +      BE_PHY_NOT_PRESENT      = 1,
 +      BE_PHY_DIFF_MEDIA       = 2,
 +      BE_PHY_INCOMPATIBLE     = 3,
 +      BE_PHY_UNQUALIFIED      = 4,
 +      BE_PHY_UNCERTIFIED      = 5
 +};
 +
 +#define PHY_STATE_MSG_SEVERITY                0x6
 +#define PHY_STATE_OPER                        0x1
 +#define PHY_STATE_INFO_VALID          0x80
 +#define       PHY_STATE_OPER_MSG_NONE         0x2
 +#define DEFAULT_MSG_SEVERITY          0x1
 +
 +#define be_phy_state_unknown(phy_state) (phy_state > BE_PHY_UNCERTIFIED)
 +#define be_phy_unqualified(phy_state)                         \
 +                      (phy_state == BE_PHY_UNQUALIFIED ||     \
 +                       phy_state == BE_PHY_UNCERTIFIED)
 +#define be_phy_misconfigured(phy_state)                               \
 +                      (phy_state == BE_PHY_INCOMPATIBLE ||    \
 +                       phy_state == BE_PHY_UNQUALIFIED ||     \
 +                       phy_state == BE_PHY_UNCERTIFIED)
 +
 +extern  char *be_misconfig_evt_port_state[];
 +
  /* async event indicating misconfigured port */
  struct be_async_event_misconfig_port {
 + /* DATA_WORD1:
 +  * phy state of port 0: bits 7 - 0
 +  * phy state of port 1: bits 15 - 8
 +  * phy state of port 2: bits 23 - 16
 +  * phy state of port 3: bits 31 - 24
 +  */
        u32 event_data_word1;
 + /* DATA_WORD2:
 +  * phy state info of port 0: bits 7 - 0
 +  * phy state info of port 1: bits 15 - 8
 +  * phy state info of port 2: bits 23 - 16
 +  * phy state info of port 3: bits 31 - 24
 +  *
 +  * PHY STATE INFO:
 +  * Link operability   :bit 0
 +  * Message severity   :bit 2 - 1
 +  * Rsvd                       :bits 6 - 3
 +  * phy state info valid       :bit 7
 +  */
        u32 event_data_word2;
        u32 rsvd0;
        u32 flags;
@@@ -666,10 -622,13 +666,13 @@@ enum be_if_flags 
                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |\
                                         BE_IF_FLAGS_MCAST_PROMISCUOUS)
  
- #define BE_IF_EN_FLAGS        (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
-                       BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+ #define BE_IF_FILT_FLAGS_BASIC (BE_IF_FLAGS_BROADCAST | \
+                               BE_IF_FLAGS_PASS_L3L4_ERRORS | \
+                               BE_IF_FLAGS_UNTAGGED)
  
- #define BE_IF_ALL_FILT_FLAGS  (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+ #define BE_IF_ALL_FILT_FLAGS  (BE_IF_FILT_FLAGS_BASIC | \
+                                BE_IF_FLAGS_MULTICAST | \
+                                BE_IF_FLAGS_ALL_PROMISCUOUS)
  
  /* An RX interface is an object with one or more MAC addresses and
   * filtering capabilities. */
index 17422b20a8ec0322bd46b3a19bba48bcf01bdc17,d1cf1274fc2f4de4b763e683b182e485e39452e6..536686476369bfb242ceca0e28a436d150a1ed15
@@@ -125,6 -125,11 +125,11 @@@ static const char * const ue_status_hi_
        "Unknown"
  };
  
+ #define BE_VF_IF_EN_FLAGS     (BE_IF_FLAGS_UNTAGGED | \
+                                BE_IF_FLAGS_BROADCAST | \
+                                BE_IF_FLAGS_MULTICAST | \
+                                BE_IF_FLAGS_PASS_L3L4_ERRORS)
  static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
  {
        struct be_dma_mem *mem = &q->dma_mem;
@@@ -849,9 -854,9 +854,9 @@@ static void unmap_tx_frag(struct devic
  }
  
  /* Grab a WRB header for xmit */
 -static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
 +static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
  {
 -      u16 head = txo->q.head;
 +      u32 head = txo->q.head;
  
        queue_head_inc(&txo->q);
        return head;
@@@ -895,7 -900,7 +900,7 @@@ static void be_tx_setup_wrb_frag(struc
   * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
   */
  static void be_xmit_restore(struct be_adapter *adapter,
 -                          struct be_tx_obj *txo, u16 head, bool map_single,
 +                          struct be_tx_obj *txo, u32 head, bool map_single,
                            u32 copied)
  {
        struct device *dev;
@@@ -930,7 -935,7 +935,7 @@@ static u32 be_xmit_enqueue(struct be_ad
        struct device *dev = &adapter->pdev->dev;
        struct be_queue_info *txq = &txo->q;
        bool map_single = false;
 -      u16 head = txq->head;
 +      u32 head = txq->head;
        dma_addr_t busaddr;
        int len;
  
@@@ -1123,8 -1128,6 +1128,8 @@@ static struct sk_buff *be_xmit_workarou
                                           struct sk_buff *skb,
                                           struct be_wrb_params *wrb_params)
  {
 +      int err;
 +
        /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
         * packets that are 32b or less may cause a transmit stall
         * on that port. The workaround is to pad such packets
                        return NULL;
        }
  
 +      /* The stack can send us skbs with length greater than
 +       * what the HW can handle. Trim the extra bytes.
 +       */
 +      WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
 +      err = pskb_trim(skb, BE_MAX_GSO_SIZE);
 +      WARN_ON(err);
 +
        return skb;
  }
  
@@@ -1472,9 -1468,6 +1477,9 @@@ static int be_vlan_rem_vid(struct net_d
        if (lancer_chip(adapter) && vid == 0)
                return 0;
  
 +      if (!test_bit(vid, adapter->vids))
 +              return 0;
 +
        clear_bit(vid, adapter->vids);
        adapter->vlans_added--;
  
@@@ -1926,7 -1919,8 +1931,7 @@@ static u32 be_get_eq_delay_mult_enc(str
        if (!aic->enable)
                return 0;
  
 -      if (time_before_eq(now, aic->jiffies) ||
 -          jiffies_to_msecs(now - aic->jiffies) < 1)
 +      if (jiffies_to_msecs(now - aic->jiffies) < 1)
                eqd = aic->prev_eqd;
        else
                eqd = be_get_new_eqd(eqo);
@@@ -1999,7 -1993,7 +2004,7 @@@ static struct be_rx_page_info *get_rx_p
        struct be_adapter *adapter = rxo->adapter;
        struct be_rx_page_info *rx_page_info;
        struct be_queue_info *rxq = &rxo->q;
 -      u16 frag_idx = rxq->tail;
 +      u32 frag_idx = rxq->tail;
  
        rx_page_info = &rxo->page_info_tbl[frag_idx];
        BUG_ON(!rx_page_info->page);
@@@ -2410,11 -2404,10 +2415,11 @@@ static u16 be_tx_compl_process(struct b
  {
        struct sk_buff **sent_skbs = txo->sent_skb_list;
        struct be_queue_info *txq = &txo->q;
 -      u16 frag_index, num_wrbs = 0;
        struct sk_buff *skb = NULL;
        bool unmap_skb_hdr = false;
        struct be_eth_wrb *wrb;
 +      u16 num_wrbs = 0;
 +      u32 frag_index;
  
        do {
                if (sent_skbs[txq->tail]) {
@@@ -2526,11 -2519,10 +2531,11 @@@ static void be_rx_cq_clean(struct be_rx
  
  static void be_tx_compl_clean(struct be_adapter *adapter)
  {
 -      u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
        struct device *dev = &adapter->pdev->dev;
 +      u16 cmpl = 0, timeo = 0, num_wrbs = 0;
        struct be_tx_compl_info *txcp;
        struct be_queue_info *txq;
 +      u32 end_idx, notified_idx;
        struct be_tx_obj *txo;
        int i, pending_txqs;
  
@@@ -3376,7 -3368,6 +3381,7 @@@ done
  
  static void be_rx_qs_destroy(struct be_adapter *adapter)
  {
 +      struct rss_info *rss = &adapter->rss_info;
        struct be_queue_info *q;
        struct be_rx_obj *rxo;
        int i;
                }
                be_queue_free(adapter, q);
        }
 +
 +      if (rss->rss_flags) {
 +              rss->rss_flags = RSS_ENABLE_NONE;
 +              be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
 +                                128, rss->rss_hkey);
 +      }
  }
  
  static void be_disable_if_filters(struct be_adapter *adapter)
@@@ -3529,21 -3514,20 +3534,21 @@@ static int be_rx_qs_create(struct be_ad
                if (!BEx_chip(adapter))
                        rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
                                RSS_ENABLE_UDP_IPV6;
 +
 +              netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
 +              rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
 +                                     RSS_INDIR_TABLE_LEN, rss_key);
 +              if (rc) {
 +                      rss->rss_flags = RSS_ENABLE_NONE;
 +                      return rc;
 +              }
 +
 +              memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
        } else {
                /* Disable RSS, if only default RX Q is created */
                rss->rss_flags = RSS_ENABLE_NONE;
        }
  
 -      netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
 -      rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
 -                             RSS_INDIR_TABLE_LEN, rss_key);
 -      if (rc) {
 -              rss->rss_flags = RSS_ENABLE_NONE;
 -              return rc;
 -      }
 -
 -      memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
  
        /* Post 1 less than RXQ-len to avoid head being equal to tail,
         * which is a queue empty condition
@@@ -3558,7 -3542,7 +3563,7 @@@ static int be_enable_if_filters(struct 
  {
        int status;
  
-       status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+       status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
        if (status)
                return status;
  
@@@ -3810,15 -3794,18 +3815,15 @@@ static u16 be_calculate_vf_qs(struct be
        struct be_resources res = adapter->pool_res;
        u16 num_vf_qs = 1;
  
 -      /* Distribute the queue resources equally among the PF and it's VFs
 +      /* Distribute the queue resources among the PF and it's VFs
         * Do not distribute queue resources in multi-channel configuration.
         */
        if (num_vfs && !be_is_mc(adapter)) {
 -              /* If number of VFs requested is 8 less than max supported,
 -               * assign 8 queue pairs to the PF and divide the remaining
 -               * resources evenly among the VFs
 -               */
 -              if (num_vfs < (be_max_vfs(adapter) - 8))
 -                      num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
 -              else
 -                      num_vf_qs = res.max_rss_qs / num_vfs;
 +               /* Divide the qpairs evenly among the VFs and the PF, capped
 +                * at VF-EQ-count. Any remainder qpairs belong to the PF.
 +                */
 +              num_vf_qs = min(SH_VF_MAX_NIC_EQS,
 +                              res.max_rss_qs / (num_vfs + 1));
  
                /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
                 * interfaces per port. Provide RSS on VFs, only if number
@@@ -3875,8 -3862,7 +3880,7 @@@ static int be_vfs_if_create(struct be_a
        int status;
  
        /* If a FW profile exists, then cap_flags are updated */
-       cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
-                   BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
+       cap_flags = BE_VF_IF_EN_FLAGS;
  
        for_all_vfs(adapter, vf_cfg, vf) {
                if (!BE3_chip(adapter)) {
                        }
                }
  
-               en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
-                                       BE_IF_FLAGS_BROADCAST |
-                                       BE_IF_FLAGS_MULTICAST |
-                                       BE_IF_FLAGS_PASS_L3L4_ERRORS);
+               /* PF should enable IF flags during proxy if_create call */
+               en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
                status = be_cmd_if_create(adapter, cap_flags, en_flags,
                                          &vf_cfg->if_handle, vf + 1);
                if (status)
@@@ -4100,7 -4084,6 +4102,7 @@@ static void be_setup_init(struct be_ada
        adapter->if_handle = -1;
        adapter->be3_native = false;
        adapter->if_flags = 0;
 +      adapter->phy_state = BE_UNKNOWN_PHY_STATE;
        if (be_physfn(adapter))
                adapter->cmd_privileges = MAX_PRIVILEGES;
        else
@@@ -4284,10 -4267,10 +4286,10 @@@ static void be_schedule_worker(struct b
        adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
  }
  
 -static void be_schedule_err_detection(struct be_adapter *adapter)
 +static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
  {
        schedule_delayed_work(&adapter->be_err_detection_work,
 -                            msecs_to_jiffies(1000));
 +                            msecs_to_jiffies(delay));
        adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
  }
  
        return status;
  }
  
 +static int be_if_create(struct be_adapter *adapter)
 +{
 +      u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
 +      u32 cap_flags = be_if_cap_flags(adapter);
 +      int status;
 +
 +      if (adapter->cfg_num_qs == 1)
 +              cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
 +
 +      en_flags &= cap_flags;
 +      /* will enable all the needed filter flags in be_open() */
 +      status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
 +                                &adapter->if_handle, 0);
 +
 +      return status;
 +}
 +
  int be_update_queues(struct be_adapter *adapter)
  {
        struct net_device *netdev = adapter->netdev;
                be_msix_disable(adapter);
  
        be_clear_queues(adapter);
 +      status = be_cmd_if_destroy(adapter, adapter->if_handle,  0);
 +      if (status)
 +              return status;
  
        if (!msix_enabled(adapter)) {
                status = be_msix_enable(adapter);
                        return status;
        }
  
 +      status = be_if_create(adapter);
 +      if (status)
 +              return status;
 +
        status = be_setup_queues(adapter);
        if (status)
                return status;
@@@ -4438,6 -4397,7 +4440,6 @@@ static int be_func_init(struct be_adapt
  static int be_setup(struct be_adapter *adapter)
  {
        struct device *dev = &adapter->pdev->dev;
 -      u32 en_flags;
        int status;
  
        status = be_func_init(adapter);
                goto err;
  
        /* will enable all the needed filter flags in be_open() */
 -      en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
 -      en_flags = en_flags & be_if_cap_flags(adapter);
 -      status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
 -                                &adapter->if_handle, 0);
 +      status = be_if_create(adapter);
        if (status)
                goto err;
  
@@@ -4628,9 -4591,6 +4630,9 @@@ static int be_ndo_bridge_getlink(struc
  
        /* BE and Lancer chips support VEB mode only */
        if (BEx_chip(adapter) || lancer_chip(adapter)) {
 +              /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
 +              if (!pci_sriov_get_totalvfs(adapter->pdev))
 +                      return 0;
                hsw_mode = PORT_FWD_TYPE_VEB;
        } else {
                status = be_cmd_get_hsw_config(adapter, NULL, 0,
@@@ -4846,7 -4806,7 +4848,7 @@@ static void be_netdev_init(struct net_d
        netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
                NETIF_F_HW_VLAN_CTAG_TX;
 -      if (be_multi_rxq(adapter))
 +      if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
                netdev->hw_features |= NETIF_F_RXHASH;
  
        netdev->features |= netdev->hw_features |
  
        netdev->flags |= IFF_MULTICAST;
  
 -      netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
 +      netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
  
        netdev->netdev_ops = &be_netdev_ops;
  
@@@ -4901,27 -4861,21 +4903,27 @@@ static int be_resume(struct be_adapter 
  
  static int be_err_recover(struct be_adapter *adapter)
  {
 -      struct device *dev = &adapter->pdev->dev;
        int status;
  
 +      /* Error recovery is supported only Lancer as of now */
 +      if (!lancer_chip(adapter))
 +              return -EIO;
 +
 +      /* Wait for adapter to reach quiescent state before
 +       * destroying queues
 +       */
 +      status = be_fw_wait_ready(adapter);
 +      if (status)
 +              goto err;
 +
 +      be_cleanup(adapter);
 +
        status = be_resume(adapter);
        if (status)
                goto err;
  
 -      dev_info(dev, "Adapter recovery successful\n");
        return 0;
  err:
 -      if (be_physfn(adapter))
 -              dev_err(dev, "Adapter recovery failed\n");
 -      else
 -              dev_err(dev, "Re-trying adapter recovery\n");
 -
        return status;
  }
  
@@@ -4930,43 -4884,21 +4932,43 @@@ static void be_err_detection_task(struc
        struct be_adapter *adapter =
                                container_of(work, struct be_adapter,
                                             be_err_detection_work.work);
 -      int status = 0;
 +      struct device *dev = &adapter->pdev->dev;
 +      int recovery_status;
 +      int delay = ERR_DETECTION_DELAY;
  
        be_detect_error(adapter);
  
 -      if (be_check_error(adapter, BE_ERROR_HW)) {
 -              be_cleanup(adapter);
 -
 -              /* As of now error recovery support is in Lancer only */
 -              if (lancer_chip(adapter))
 -                      status = be_err_recover(adapter);
 +      if (be_check_error(adapter, BE_ERROR_HW))
 +              recovery_status = be_err_recover(adapter);
 +      else
 +              goto reschedule_task;
 +
 +      if (!recovery_status) {
 +              adapter->recovery_retries = 0;
 +              dev_info(dev, "Adapter recovery successful\n");
 +              goto reschedule_task;
 +      } else if (be_virtfn(adapter)) {
 +              /* For VFs, check if PF have allocated resources
 +               * every second.
 +               */
 +              dev_err(dev, "Re-trying adapter recovery\n");
 +              goto reschedule_task;
 +      } else if (adapter->recovery_retries++ <
 +                 MAX_ERR_RECOVERY_RETRY_COUNT) {
 +              /* In case of another error during recovery, it takes 30 sec
 +               * for adapter to come out of error. Retry error recovery after
 +               * this time interval.
 +               */
 +              dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
 +              delay = ERR_RECOVERY_RETRY_DELAY;
 +              goto reschedule_task;
 +      } else {
 +              dev_err(dev, "Adapter recovery failed\n");
        }
  
 -      /* Always attempt recovery on VFs */
 -      if (!status || be_virtfn(adapter))
 -              be_schedule_err_detection(adapter);
 +      return;
 +reschedule_task:
 +      be_schedule_err_detection(adapter, delay);
  }
  
  static void be_log_sfp_info(struct be_adapter *adapter)
        status = be_cmd_query_sfp_info(adapter);
        if (!status) {
                dev_err(&adapter->pdev->dev,
 -                      "Unqualified SFP+ detected on %c from %s part no: %s",
 -                      adapter->port_name, adapter->phy.vendor_name,
 +                      "Port %c: %s Vendor: %s part no: %s",
 +                      adapter->port_name,
 +                      be_misconfig_evt_port_state[adapter->phy_state],
 +                      adapter->phy.vendor_name,
                        adapter->phy.vendor_pn);
        }
 -      adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
 +      adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
  }
  
  static void be_worker(struct work_struct *work)
        if (!skyhawk_chip(adapter))
                be_eqd_update(adapter, false);
  
 -      if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
 +      if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
                be_log_sfp_info(adapter);
  
  reschedule:
@@@ -5040,6 -4970,8 +5042,8 @@@ static void be_unmap_pci_bars(struct be
                pci_iounmap(adapter->pdev, adapter->csr);
        if (adapter->db)
                pci_iounmap(adapter->pdev, adapter->db);
+       if (adapter->pcicfg && adapter->pcicfg_mapped)
+               pci_iounmap(adapter->pdev, adapter->pcicfg);
  }
  
  static int db_bar(struct be_adapter *adapter)
@@@ -5091,8 -5023,10 +5095,10 @@@ static int be_map_pci_bars(struct be_ad
                        if (!addr)
                                goto pci_map_err;
                        adapter->pcicfg = addr;
+                       adapter->pcicfg_mapped = true;
                } else {
                        adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
+                       adapter->pcicfg_mapped = false;
                }
        }
  
@@@ -5364,7 -5298,7 +5370,7 @@@ static int be_probe(struct pci_dev *pde
  
        be_roce_dev_add(adapter);
  
 -      be_schedule_err_detection(adapter);
 +      be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
  
        /* On Die temperature not supported for VF. */
        if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
@@@ -5431,7 -5365,7 +5437,7 @@@ static int be_pci_resume(struct pci_de
        if (status)
                return status;
  
 -      be_schedule_err_detection(adapter);
 +      be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
  
        if (adapter->wol_en)
                be_setup_wol(adapter, false);
@@@ -5467,8 -5401,6 +5473,8 @@@ static pci_ers_result_t be_eeh_err_dete
  
        dev_err(&adapter->pdev->dev, "EEH error detected\n");
  
 +      be_roce_dev_remove(adapter);
 +
        if (!be_check_error(adapter, BE_ERROR_EEH)) {
                be_set_error(adapter, BE_ERROR_EEH);
  
@@@ -5533,9 -5465,7 +5539,9 @@@ static void be_eeh_resume(struct pci_de
        if (status)
                goto err;
  
 -      be_schedule_err_detection(adapter);
 +      be_roce_dev_add(adapter);
 +
 +      be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
        return;
  err:
        dev_err(&adapter->pdev->dev, "EEH resume failed\n");
index a01e5a32b631299e9249dd5c914c092bd36f5842,b9ecf197ad117754245a290964d7cd55d9be5d4e..08c0415483002e27a2c3a7584fccb412fd5085f7
@@@ -1111,8 -1111,10 +1111,10 @@@ static void __gfar_detect_errata_85xx(s
  
        if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
                priv->errata |= GFAR_ERRATA_12;
+       /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
        if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
-           ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
+           ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
+           ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
                priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
  }
  #endif
@@@ -2322,7 -2324,6 +2324,7 @@@ static int gfar_start_xmit(struct sk_bu
        struct txfcb *fcb = NULL;
        struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
        u32 lstatus;
 +      skb_frag_t *frag;
        int i, rq = 0;
        int do_tstamp, do_csum, do_vlan;
        u32 bufaddr;
        txbdp = txbdp_start = tx_queue->cur_tx;
        lstatus = be32_to_cpu(txbdp->lstatus);
  
 -      /* Time stamp insertion requires one additional TxBD */
 -      if (unlikely(do_tstamp))
 -              txbdp_tstamp = txbdp = next_txbd(txbdp, base,
 -                                               tx_queue->tx_ring_size);
 -
 -      if (nr_frags == 0) {
 -              if (unlikely(do_tstamp)) {
 -                      u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
 -
 -                      lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
 -                      txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
 -              } else {
 -                      lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
 -              }
 -      } else {
 -              /* Place the fragment addresses and lengths into the TxBDs */
 -              for (i = 0; i < nr_frags; i++) {
 -                      unsigned int frag_len;
 -                      /* Point at the next BD, wrapping as needed */
 -                      txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
 -
 -                      frag_len = skb_shinfo(skb)->frags[i].size;
 -
 -                      lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
 -                                BD_LFLAG(TXBD_READY);
 -
 -                      /* Handle the last BD specially */
 -                      if (i == nr_frags - 1)
 -                              lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
 -
 -                      bufaddr = skb_frag_dma_map(priv->dev,
 -                                                 &skb_shinfo(skb)->frags[i],
 -                                                 0,
 -                                                 frag_len,
 -                                                 DMA_TO_DEVICE);
 -                      if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
 -                              goto dma_map_err;
 -
 -                      /* set the TxBD length and buffer pointer */
 -                      txbdp->bufPtr = cpu_to_be32(bufaddr);
 -                      txbdp->lstatus = cpu_to_be32(lstatus);
 -              }
 -
 -              lstatus = be32_to_cpu(txbdp_start->lstatus);
 -      }
 -
        /* Add TxPAL between FCB and frame if required */
        if (unlikely(do_tstamp)) {
                skb_push(skb, GMAC_TXPAL_LEN);
        if (do_vlan)
                gfar_tx_vlan(skb, fcb);
  
 -      /* Setup tx hardware time stamping if requested */
 -      if (unlikely(do_tstamp)) {
 -              skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 -              fcb->ptp = 1;
 -      }
 -
        bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
                                 DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
  
        txbdp_start->bufPtr = cpu_to_be32(bufaddr);
  
 +      /* Time stamp insertion requires one additional TxBD */
 +      if (unlikely(do_tstamp))
 +              txbdp_tstamp = txbdp = next_txbd(txbdp, base,
 +                                               tx_queue->tx_ring_size);
 +
 +      if (likely(!nr_frags)) {
 +              lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
 +      } else {
 +              u32 lstatus_start = lstatus;
 +
 +              /* Place the fragment addresses and lengths into the TxBDs */
 +              frag = &skb_shinfo(skb)->frags[0];
 +              for (i = 0; i < nr_frags; i++, frag++) {
 +                      unsigned int size;
 +
 +                      /* Point at the next BD, wrapping as needed */
 +                      txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
 +
 +                      size = skb_frag_size(frag);
 +
 +                      lstatus = be32_to_cpu(txbdp->lstatus) | size |
 +                                BD_LFLAG(TXBD_READY);
 +
 +                      /* Handle the last BD specially */
 +                      if (i == nr_frags - 1)
 +                              lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
 +
 +                      bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
 +                                                 size, DMA_TO_DEVICE);
 +                      if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
 +                              goto dma_map_err;
 +
 +                      /* set the TxBD length and buffer pointer */
 +                      txbdp->bufPtr = cpu_to_be32(bufaddr);
 +                      txbdp->lstatus = cpu_to_be32(lstatus);
 +              }
 +
 +              lstatus = lstatus_start;
 +      }
 +
        /* If time stamping is requested one additional TxBD must be set up. The
         * first TxBD points to the FCB and must have a data length of
         * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
  
                bufaddr = be32_to_cpu(txbdp_start->bufPtr);
                bufaddr += fcb_len;
 +
                lstatus_ts |= BD_LFLAG(TXBD_READY) |
                              (skb_headlen(skb) - fcb_len);
 +              if (!nr_frags)
 +                      lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
  
                txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
                txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
 +
 +              /* Setup tx hardware time stamping */
 +              skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 +              fcb->ptp = 1;
        } else {
                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
        }
@@@ -2706,7 -2712,7 +2708,7 @@@ static void gfar_clean_tx_ring(struct g
                                          ~0x7UL);
  
                        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 -                      shhwtstamps.hwtstamp = ns_to_ktime(*ns);
 +                      shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
                        skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
                        skb_tstamp_tx(skb, &shhwtstamps);
                        gfar_clear_txbd_status(bdp);
@@@ -3035,7 -3041,7 +3037,7 @@@ static void gfar_process_frame(struct n
                u64 *ns = (u64 *) skb->data;
  
                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 -              shhwtstamps->hwtstamp = ns_to_ktime(*ns);
 +              shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
        }
  
        if (priv->padding)
index 16b26d17c54c2d0374112d293ff0f0377f3ecdef,21e2c09602716351a21dc2762f1631a298bf05eb..b4b258c8ca47d47f804fc85adb665e9d08fa48df
@@@ -40,7 -40,6 +40,7 @@@
  #include <net/ip.h>
  #include <net/busy_poll.h>
  #include <net/vxlan.h>
 +#include <net/devlink.h>
  
  #include <linux/mlx4/driver.h>
  #include <linux/mlx4/device.h>
@@@ -70,15 -69,6 +70,15 @@@ int mlx4_en_setup_tc(struct net_device 
        return 0;
  }
  
 +static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
 +                            struct tc_to_netdev *tc)
 +{
 +      if (tc->type != TC_SETUP_MQPRIO)
 +              return -EINVAL;
 +
 +      return mlx4_en_setup_tc(dev, tc->tc);
 +}
 +
  #ifdef CONFIG_RFS_ACCEL
  
  struct mlx4_en_filter {
@@@ -2034,11 -2024,8 +2034,11 @@@ void mlx4_en_destroy_netdev(struct net_
        en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
  
        /* Unregister device - this will close the port if it was up */
 -      if (priv->registered)
 +      if (priv->registered) {
 +              devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
 +                                                            priv->port));
                unregister_netdev(dev);
 +      }
  
        if (priv->allocated)
                mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
@@@ -2258,7 -2245,7 +2258,7 @@@ static int mlx4_en_set_vf_mac(struct ne
        struct mlx4_en_dev *mdev = en_priv->mdev;
        u64 mac_u64 = mlx4_mac_to_u64(mac);
  
-       if (!is_valid_ether_addr(mac))
+       if (is_multicast_ether_addr(mac))
                return -EINVAL;
  
        return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
@@@ -2475,7 -2462,7 +2475,7 @@@ static const struct net_device_ops mlx4
  #endif
        .ndo_set_features       = mlx4_en_set_features,
        .ndo_fix_features       = mlx4_en_fix_features,
 -      .ndo_setup_tc           = mlx4_en_setup_tc,
 +      .ndo_setup_tc           = __mlx4_en_setup_tc,
  #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
  #endif
@@@ -2513,7 -2500,7 +2513,7 @@@ static const struct net_device_ops mlx4
  #endif
        .ndo_set_features       = mlx4_en_set_features,
        .ndo_fix_features       = mlx4_en_fix_features,
 -      .ndo_setup_tc           = mlx4_en_setup_tc,
 +      .ndo_setup_tc           = __mlx4_en_setup_tc,
  #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
  #endif
@@@ -3055,8 -3042,6 +3055,8 @@@ int mlx4_en_init_netdev(struct mlx4_en_
        }
  
        priv->registered = 1;
 +      devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
 +                                dev);
  
        return 0;
  
index b8a51515e73cf5aea030b79848d25964d06472f5,f8674ae62752d53bc768c16ac9b2de3053e183eb..503ec23e84cce68e6e47613ea9dc495dae9942cd
@@@ -42,7 -42,6 +42,7 @@@
  #include <linux/io-mapping.h>
  #include <linux/delay.h>
  #include <linux/kmod.h>
 +#include <net/devlink.h>
  
  #include <linux/mlx4/device.h>
  #include <linux/mlx4/doorbell.h>
@@@ -1082,20 -1081,36 +1082,20 @@@ static ssize_t show_port_type(struct de
        return strlen(buf);
  }
  
 -static ssize_t set_port_type(struct device *dev,
 -                           struct device_attribute *attr,
 -                           const char *buf, size_t count)
 +static int __set_port_type(struct mlx4_port_info *info,
 +                         enum mlx4_port_type port_type)
  {
 -      struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
 -                                                 port_attr);
        struct mlx4_dev *mdev = info->dev;
        struct mlx4_priv *priv = mlx4_priv(mdev);
        enum mlx4_port_type types[MLX4_MAX_PORTS];
        enum mlx4_port_type new_types[MLX4_MAX_PORTS];
 -      static DEFINE_MUTEX(set_port_type_mutex);
        int i;
        int err = 0;
  
 -      mutex_lock(&set_port_type_mutex);
 -
 -      if (!strcmp(buf, "ib\n"))
 -              info->tmp_type = MLX4_PORT_TYPE_IB;
 -      else if (!strcmp(buf, "eth\n"))
 -              info->tmp_type = MLX4_PORT_TYPE_ETH;
 -      else if (!strcmp(buf, "auto\n"))
 -              info->tmp_type = MLX4_PORT_TYPE_AUTO;
 -      else {
 -              mlx4_err(mdev, "%s is not supported port type\n", buf);
 -              err = -EINVAL;
 -              goto err_out;
 -      }
 -
        mlx4_stop_sense(mdev);
        mutex_lock(&priv->port_mutex);
 +      info->tmp_type = port_type;
 +
        /* Possible type is always the one that was delivered */
        mdev->caps.possible_type[info->port] = info->tmp_type;
  
  out:
        mlx4_start_sense(mdev);
        mutex_unlock(&priv->port_mutex);
 +
 +      return err;
 +}
 +
 +static ssize_t set_port_type(struct device *dev,
 +                           struct device_attribute *attr,
 +                           const char *buf, size_t count)
 +{
 +      struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
 +                                                 port_attr);
 +      struct mlx4_dev *mdev = info->dev;
 +      enum mlx4_port_type port_type;
 +      static DEFINE_MUTEX(set_port_type_mutex);
 +      int err;
 +
 +      mutex_lock(&set_port_type_mutex);
 +
 +      if (!strcmp(buf, "ib\n")) {
 +              port_type = MLX4_PORT_TYPE_IB;
 +      } else if (!strcmp(buf, "eth\n")) {
 +              port_type = MLX4_PORT_TYPE_ETH;
 +      } else if (!strcmp(buf, "auto\n")) {
 +              port_type = MLX4_PORT_TYPE_AUTO;
 +      } else {
 +              mlx4_err(mdev, "%s is not supported port type\n", buf);
 +              err = -EINVAL;
 +              goto err_out;
 +      }
 +
 +      err = __set_port_type(info, port_type);
 +
  err_out:
        mutex_unlock(&set_port_type_mutex);
  
@@@ -1272,6 -1256,7 +1272,7 @@@ err_set_port
  static int mlx4_mf_bond(struct mlx4_dev *dev)
  {
        int err = 0;
+       int nvfs;
        struct mlx4_slaves_pport slaves_port1;
        struct mlx4_slaves_pport slaves_port2;
        DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
                return -EINVAL;
        }
  
+       /* number of virtual functions is number of total functions minus one
+        * physical function for each port.
+        */
+       nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
+               bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
        /* limit on maximum allowed VFs */
-       if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
-           bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) >
-           MAX_MF_BOND_ALLOWED_SLAVES)
+       if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
+               mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
+                         nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
                return -EINVAL;
+       }
  
        if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
                mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
@@@ -2897,13 -2889,8 +2905,13 @@@ no_msi
  
  static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
  {
 +      struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
        struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
 -      int err = 0;
 +      int err;
 +
 +      err = devlink_port_register(devlink, &info->devlink_port, port);
 +      if (err)
 +              return err;
  
        info->dev = dev;
        info->port = port;
        err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
        if (err) {
                mlx4_err(dev, "Failed to create file for port %d\n", port);
 +              devlink_port_unregister(&info->devlink_port);
                info->port = -1;
        }
  
@@@ -3700,54 -3686,23 +3708,54 @@@ err_disable_pdev
        return err;
  }
  
 +static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
 +                                    enum devlink_port_type port_type)
 +{
 +      struct mlx4_port_info *info = container_of(devlink_port,
 +                                                 struct mlx4_port_info,
 +                                                 devlink_port);
 +      enum mlx4_port_type mlx4_port_type;
 +
 +      switch (port_type) {
 +      case DEVLINK_PORT_TYPE_AUTO:
 +              mlx4_port_type = MLX4_PORT_TYPE_AUTO;
 +              break;
 +      case DEVLINK_PORT_TYPE_ETH:
 +              mlx4_port_type = MLX4_PORT_TYPE_ETH;
 +              break;
 +      case DEVLINK_PORT_TYPE_IB:
 +              mlx4_port_type = MLX4_PORT_TYPE_IB;
 +              break;
 +      default:
 +              return -EOPNOTSUPP;
 +      }
 +
 +      return __set_port_type(info, mlx4_port_type);
 +}
 +
 +static const struct devlink_ops mlx4_devlink_ops = {
 +      .port_type_set  = mlx4_devlink_port_type_set,
 +};
 +
  static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
  {
 +      struct devlink *devlink;
        struct mlx4_priv *priv;
        struct mlx4_dev *dev;
        int ret;
  
        printk_once(KERN_INFO "%s", mlx4_version);
  
 -      priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 -      if (!priv)
 +      devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv));
 +      if (!devlink)
                return -ENOMEM;
 +      priv = devlink_priv(devlink);
  
        dev       = &priv->dev;
        dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
        if (!dev->persist) {
 -              kfree(priv);
 -              return -ENOMEM;
 +              ret = -ENOMEM;
 +              goto err_devlink_free;
        }
        dev->persist->pdev = pdev;
        dev->persist->dev = dev;
        mutex_init(&dev->persist->device_state_mutex);
        mutex_init(&dev->persist->interface_state_mutex);
  
 +      ret = devlink_register(devlink, &pdev->dev);
 +      if (ret)
 +              goto err_persist_free;
 +
        ret =  __mlx4_init_one(pdev, id->driver_data, priv);
 -      if (ret) {
 -              kfree(dev->persist);
 -              kfree(priv);
 -      } else {
 -              pci_save_state(pdev);
 -      }
 +      if (ret)
 +              goto err_devlink_unregister;
  
 +      pci_save_state(pdev);
 +      return 0;
 +
 +err_devlink_unregister:
 +      devlink_unregister(devlink);
 +err_persist_free:
 +      kfree(dev->persist);
 +err_devlink_free:
 +      devlink_free(devlink);
        return ret;
  }
  
@@@ -3873,7 -3819,6 +3881,7 @@@ static void mlx4_remove_one(struct pci_
        struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
        struct mlx4_dev  *dev  = persist->dev;
        struct mlx4_priv *priv = mlx4_priv(dev);
 +      struct devlink *devlink = priv_to_devlink(priv);
        int active_vfs = 0;
  
        mutex_lock(&persist->interface_state_mutex);
  
        pci_release_regions(pdev);
        pci_disable_device(pdev);
 +      devlink_unregister(devlink);
        kfree(dev->persist);
 -      kfree(priv);
 +      devlink_free(devlink);
        pci_set_drvdata(pdev, NULL);
  }
  
index 9c0e80e64b43e701ee331ed9a256d8ee6a086e44,5b1753233c5dd8c30c1dd110a952c6d9c4360e39..dbc2fb89e0673c41a485d5f58787eea953cf3f4e
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
 + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   *
   * This software is available to you under a choice of one of two
   * licenses.  You may choose to be licensed under the terms of the GNU
@@@ -29,8 -29,6 +29,8 @@@
   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   * SOFTWARE.
   */
 +#ifndef __MLX5_EN_H__
 +#define __MLX5_EN_H__
  
  #include <linux/if_vlan.h>
  #include <linux/etherdevice.h>
@@@ -40,7 -38,6 +40,7 @@@
  #include <linux/mlx5/driver.h>
  #include <linux/mlx5/qp.h>
  #include <linux/mlx5/cq.h>
 +#include <linux/mlx5/port.h>
  #include <linux/mlx5/vport.h>
  #include <linux/mlx5/transobj.h>
  #include "wq.h"
  
  #define MLX5E_NUM_MAIN_GROUPS 9
  
 +#ifdef CONFIG_MLX5_CORE_EN_DCB
 +#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
 +#define MLX5E_MIN_BW_ALLOC 1   /* Min percentage of BW allocation */
 +#endif
 +
  static const char vport_strings[][ETH_GSTRING_LEN] = {
        /* vport statistics */
        "rx_packets",
        /* SW counters */
        "tso_packets",
        "tso_bytes",
 +      "tso_inner_packets",
 +      "tso_inner_bytes",
        "lro_packets",
        "lro_bytes",
        "rx_csum_good",
        "rx_csum_none",
        "rx_csum_sw",
        "tx_csum_offload",
 +      "tx_csum_inner",
        "tx_queue_stopped",
        "tx_queue_wake",
        "tx_queue_dropped",
@@@ -144,21 -133,18 +144,21 @@@ struct mlx5e_vport_stats 
        /* SW counters */
        u64 tso_packets;
        u64 tso_bytes;
 +      u64 tso_inner_packets;
 +      u64 tso_inner_bytes;
        u64 lro_packets;
        u64 lro_bytes;
        u64 rx_csum_good;
        u64 rx_csum_none;
        u64 rx_csum_sw;
        u64 tx_csum_offload;
 +      u64 tx_csum_inner;
        u64 tx_queue_stopped;
        u64 tx_queue_wake;
        u64 tx_queue_dropped;
        u64 rx_wqe_err;
  
 -#define NUM_VPORT_COUNTERS     32
 +#define NUM_VPORT_COUNTERS     35
  };
  
  static const char pport_strings[][ETH_GSTRING_LEN] = {
@@@ -237,6 -223,7 +237,7 @@@ struct mlx5e_pport_stats 
  
  static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
        "packets",
+       "bytes",
        "csum_none",
        "csum_sw",
        "lro_packets",
  
  struct mlx5e_rq_stats {
        u64 packets;
+       u64 bytes;
        u64 csum_none;
        u64 csum_sw;
        u64 lro_packets;
        u64 lro_bytes;
        u64 wqe_err;
- #define NUM_RQ_STATS 6
+ #define NUM_RQ_STATS 7
  };
  
  static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
        "packets",
+       "bytes",
        "tso_packets",
        "tso_bytes",
 +      "tso_inner_packets",
 +      "tso_inner_bytes",
 +      "csum_offload_inner",
 +      "nop",
        "csum_offload_none",
        "stopped",
        "wake",
        "dropped",
 -      "nop"
  };
  
  struct mlx5e_sq_stats {
 +      /* commonly accessed in data path */
        u64 packets;
+       u64 bytes;
        u64 tso_packets;
        u64 tso_bytes;
 +      u64 tso_inner_packets;
 +      u64 tso_inner_bytes;
 +      u64 csum_offload_inner;
 +      u64 nop;
 +      /* less likely accessed in data path */
        u64 csum_offload_none;
        u64 stopped;
        u64 wake;
        u64 dropped;
- #define NUM_SQ_STATS 11
 -      u64 nop;
 -#define NUM_SQ_STATS 9
++#define NUM_SQ_STATS 12
  };
  
  struct mlx5e_stats {
@@@ -294,6 -276,7 +298,6 @@@ struct mlx5e_params 
        u8  log_sq_size;
        u8  log_rq_size;
        u16 num_channels;
 -      u8  default_vlan_prio;
        u8  num_tc;
        u16 rx_cq_moderation_usec;
        u16 rx_cq_moderation_pkts;
        u8  rss_hfunc;
        u8  toeplitz_hash_key[40];
        u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 +#ifdef CONFIG_MLX5_CORE_EN_DCB
 +      struct ieee_ets ets;
 +#endif
  };
  
  struct mlx5e_tstamp {
@@@ -328,14 -308,9 +332,9 @@@ enum 
        MLX5E_RQ_STATE_POST_WQES_ENABLE,
  };
  
- enum cq_flags {
-       MLX5E_CQ_HAS_CQES = 1,
- };
  struct mlx5e_cq {
        /* data path - accessed per cqe */
        struct mlx5_cqwq           wq;
-       unsigned long              flags;
  
        /* data path - accessed per napi poll */
        struct napi_struct        *napi;
@@@ -388,7 -363,6 +387,7 @@@ struct mlx5e_sq_dma 
  
  enum {
        MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
 +      MLX5E_SQ_STATE_BF_ENABLE,
  };
  
  struct mlx5e_sq {
        struct mlx5_wq_cyc         wq;
        u32                        dma_fifo_mask;
        void __iomem              *uar_map;
 -      void __iomem              *uar_bf_map;
        struct netdev_queue       *txq;
        u32                        sqn;
        u16                        bf_buf_size;
@@@ -476,6 -451,8 +475,8 @@@ enum mlx5e_traffic_types 
        MLX5E_NUM_TT,
  };
  
+ #define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY)
  enum mlx5e_rqt_ix {
        MLX5E_INDIRECTION_RQT,
        MLX5E_SINGLE_RQ_RQT,
@@@ -515,11 -492,6 +516,11 @@@ struct mlx5e_vlan_db 
        bool          filter_disabled;
  };
  
 +struct mlx5e_vxlan_db {
 +      spinlock_t                      lock; /* protect vxlan table */
 +      struct radix_tree_root          tree;
 +};
 +
  struct mlx5e_flow_table {
        int num_groups;
        struct mlx5_flow_table          *t;
@@@ -534,6 -506,7 +535,6 @@@ struct mlx5e_flow_tables 
  
  struct mlx5e_priv {
        /* priv data path fields - start */
 -      int                        default_vlan_prio;
        struct mlx5e_sq            **txq_to_sq_map;
        int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
        /* priv data path fields - end */
        struct mlx5e_flow_tables   fts;
        struct mlx5e_eth_addr_db   eth_addr;
        struct mlx5e_vlan_db       vlan;
 +      struct mlx5e_vxlan_db      vxlan;
  
        struct mlx5e_params        params;
 -      spinlock_t                 async_events_spinlock; /* sync hw events */
        struct work_struct         update_carrier_work;
        struct work_struct         set_rx_mode_work;
        struct delayed_work        update_stats_work;
@@@ -646,9 -619,12 +647,12 @@@ void mlx5e_enable_vlan_filter(struct ml
  void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
  
  int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
+ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
  
  int mlx5e_open_locked(struct net_device *netdev);
  int mlx5e_close_locked(struct net_device *netdev);
+ void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+                                  int num_channels);
  
  static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
                                      struct mlx5e_tx_wqe *wqe, int bf_sz)
         * doorbell
         */
        wmb();
 -
 -      if (bf_sz) {
 -              __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
 -
 -              /* flush the write-combining mapped buffer */
 -              wmb();
 -
 -      } else {
 +      if (bf_sz)
 +              __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
 +      else
                mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
 -      }
 +      /* flush the write-combining mapped buffer */
 +      wmb();
  
        sq->bf_offset ^= sq->bf_buf_size;
  }
@@@ -689,11 -669,4 +693,11 @@@ static inline int mlx5e_get_max_num_cha
  }
  
  extern const struct ethtool_ops mlx5e_ethtool_ops;
 +#ifdef CONFIG_MLX5_CORE_EN_DCB
 +extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
 +int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
 +#endif
 +
  u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
 +
 +#endif /* __MLX5_EN_H__ */
index 0959656404b3df249f41dae77901ce3bf7da7577,5abeb00fceb8b0876d17f4aab813054d55414cc8..68834b715f6c114c89d6df339cb9cc81a72b8f75
@@@ -211,14 -211,13 +211,14 @@@ static void mlx5e_get_strings(struct ne
                                sprintf(data + (idx++) * ETH_GSTRING_LEN,
                                        "rx%d_%s", i, rq_stats_strings[j]);
  
 -              for (i = 0; i < priv->params.num_channels; i++)
 -                      for (tc = 0; tc < priv->params.num_tc; tc++)
 +              for (tc = 0; tc < priv->params.num_tc; tc++)
 +                      for (i = 0; i < priv->params.num_channels; i++)
                                for (j = 0; j < NUM_SQ_STATS; j++)
                                        sprintf(data +
 -                                              (idx++) * ETH_GSTRING_LEN,
 -                                              "tx%d_%d_%s", i, tc,
 -                                              sq_stats_strings[j]);
 +                                            (idx++) * ETH_GSTRING_LEN,
 +                                            "tx%d_%s",
 +                                            priv->channeltc_to_txq_map[i][tc],
 +                                            sq_stats_strings[j]);
                break;
        }
  }
@@@ -250,8 -249,8 +250,8 @@@ static void mlx5e_get_ethtool_stats(str
                                                &priv->state) ? 0 :
                                       ((u64 *)&priv->channel[i]->rq.stats)[j];
  
 -      for (i = 0; i < priv->params.num_channels; i++)
 -              for (tc = 0; tc < priv->params.num_tc; tc++)
 +      for (tc = 0; tc < priv->params.num_tc; tc++)
 +              for (i = 0; i < priv->params.num_channels; i++)
                        for (j = 0; j < NUM_SQ_STATS; j++)
                                data[idx++] = !test_bit(MLX5E_STATE_OPENED,
                                                        &priv->state) ? 0 :
@@@ -386,6 -385,8 +386,8 @@@ static int mlx5e_set_channels(struct ne
                mlx5e_close_locked(dev);
  
        priv->params.num_channels = count;
+       mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+                                     MLX5E_INDIR_RQT_SIZE, count);
  
        if (was_opened)
                err = mlx5e_open_locked(dev);
@@@ -400,9 -401,6 +402,9 @@@ static int mlx5e_get_coalesce(struct ne
  {
        struct mlx5e_priv *priv = netdev_priv(netdev);
  
 +      if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
 +              return -ENOTSUPP;
 +
        coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation_usec;
        coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
        coal->tx_coalesce_usecs       = priv->params.tx_cq_moderation_usec;
@@@ -420,18 -418,11 +422,18 @@@ static int mlx5e_set_coalesce(struct ne
        int tc;
        int i;
  
 +      if (!MLX5_CAP_GEN(mdev, cq_moderation))
 +              return -ENOTSUPP;
 +
 +      mutex_lock(&priv->state_lock);
        priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
        priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
        priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
        priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
  
 +      if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
 +              goto out;
 +
        for (i = 0; i < priv->params.num_channels; ++i) {
                c = priv->channel[i];
  
                                               coal->rx_max_coalesced_frames);
        }
  
 +out:
 +      mutex_unlock(&priv->state_lock);
        return 0;
  }
  
@@@ -716,18 -705,36 +718,36 @@@ static int mlx5e_get_rxfh(struct net_de
        return 0;
  }
  
+ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+ {
+       struct mlx5_core_dev *mdev = priv->mdev;
+       void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
+       int i;
+       MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
+       mlx5e_build_tir_ctx_hash(tirc, priv);
+       for (i = 0; i < MLX5E_NUM_TT; i++)
+               if (IS_HASHING_TT(i))
+                       mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen);
+ }
  static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
                          const u8 *key, const u8 hfunc)
  {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       bool close_open;
-       int err = 0;
+       int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+       void *in;
  
        if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
            (hfunc != ETH_RSS_HASH_XOR) &&
            (hfunc != ETH_RSS_HASH_TOP))
                return -EINVAL;
  
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
        mutex_lock(&priv->state_lock);
  
        if (indir) {
                mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
        }
  
-       close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
-                    test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (close_open)
-               mlx5e_close_locked(dev);
        if (key)
                memcpy(priv->params.toeplitz_hash_key, key,
                       sizeof(priv->params.toeplitz_hash_key));
        if (hfunc != ETH_RSS_HASH_NO_CHANGE)
                priv->params.rss_hfunc = hfunc;
  
-       if (close_open)
-               err = mlx5e_open_locked(priv->netdev);
+       mlx5e_modify_tirs_hash(priv, in, inlen);
  
        mutex_unlock(&priv->state_lock);
  
-       return err;
+       kvfree(in);
+       return 0;
  }
  
  static int mlx5e_get_rxnfc(struct net_device *netdev,
@@@ -897,129 -900,6 +913,129 @@@ static int mlx5e_get_ts_info(struct net
        return 0;
  }
  
 +static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
 +{
 +      __u32 ret = 0;
 +
 +      if (MLX5_CAP_GEN(mdev, wol_g))
 +              ret |= WAKE_MAGIC;
 +
 +      if (MLX5_CAP_GEN(mdev, wol_s))
 +              ret |= WAKE_MAGICSECURE;
 +
 +      if (MLX5_CAP_GEN(mdev, wol_a))
 +              ret |= WAKE_ARP;
 +
 +      if (MLX5_CAP_GEN(mdev, wol_b))
 +              ret |= WAKE_BCAST;
 +
 +      if (MLX5_CAP_GEN(mdev, wol_m))
 +              ret |= WAKE_MCAST;
 +
 +      if (MLX5_CAP_GEN(mdev, wol_u))
 +              ret |= WAKE_UCAST;
 +
 +      if (MLX5_CAP_GEN(mdev, wol_p))
 +              ret |= WAKE_PHY;
 +
 +      return ret;
 +}
 +
 +static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode)
 +{
 +      __u32 ret = 0;
 +
 +      if (mode & MLX5_WOL_MAGIC)
 +              ret |= WAKE_MAGIC;
 +
 +      if (mode & MLX5_WOL_SECURED_MAGIC)
 +              ret |= WAKE_MAGICSECURE;
 +
 +      if (mode & MLX5_WOL_ARP)
 +              ret |= WAKE_ARP;
 +
 +      if (mode & MLX5_WOL_BROADCAST)
 +              ret |= WAKE_BCAST;
 +
 +      if (mode & MLX5_WOL_MULTICAST)
 +              ret |= WAKE_MCAST;
 +
 +      if (mode & MLX5_WOL_UNICAST)
 +              ret |= WAKE_UCAST;
 +
 +      if (mode & MLX5_WOL_PHY_ACTIVITY)
 +              ret |= WAKE_PHY;
 +
 +      return ret;
 +}
 +
 +static u8 mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode)
 +{
 +      u8 ret = 0;
 +
 +      if (mode & WAKE_MAGIC)
 +              ret |= MLX5_WOL_MAGIC;
 +
 +      if (mode & WAKE_MAGICSECURE)
 +              ret |= MLX5_WOL_SECURED_MAGIC;
 +
 +      if (mode & WAKE_ARP)
 +              ret |= MLX5_WOL_ARP;
 +
 +      if (mode & WAKE_BCAST)
 +              ret |= MLX5_WOL_BROADCAST;
 +
 +      if (mode & WAKE_MCAST)
 +              ret |= MLX5_WOL_MULTICAST;
 +
 +      if (mode & WAKE_UCAST)
 +              ret |= MLX5_WOL_UNICAST;
 +
 +      if (mode & WAKE_PHY)
 +              ret |= MLX5_WOL_PHY_ACTIVITY;
 +
 +      return ret;
 +}
 +
 +static void mlx5e_get_wol(struct net_device *netdev,
 +                        struct ethtool_wolinfo *wol)
 +{
 +      struct mlx5e_priv *priv = netdev_priv(netdev);
 +      struct mlx5_core_dev *mdev = priv->mdev;
 +      u8 mlx5_wol_mode;
 +      int err;
 +
 +      memset(wol, 0, sizeof(*wol));
 +
 +      wol->supported = mlx5e_get_wol_supported(mdev);
 +      if (!wol->supported)
 +              return;
 +
 +      err = mlx5_query_port_wol(mdev, &mlx5_wol_mode);
 +      if (err)
 +              return;
 +
 +      wol->wolopts = mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode);
 +}
 +
 +static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 +{
 +      struct mlx5e_priv *priv = netdev_priv(netdev);
 +      struct mlx5_core_dev *mdev = priv->mdev;
 +      __u32 wol_supported = mlx5e_get_wol_supported(mdev);
 +      u32 mlx5_wol_mode;
 +
 +      if (!wol_supported)
 +              return -ENOTSUPP;
 +
 +      if (wol->wolopts & ~wol_supported)
 +              return -EINVAL;
 +
 +      mlx5_wol_mode = mlx5e_refomrat_wol_mode_linux_to_mlx5(wol->wolopts);
 +
 +      return mlx5_set_port_wol(mdev, mlx5_wol_mode);
 +}
 +
  const struct ethtool_ops mlx5e_ethtool_ops = {
        .get_drvinfo       = mlx5e_get_drvinfo,
        .get_link          = ethtool_op_get_link,
        .get_pauseparam    = mlx5e_get_pauseparam,
        .set_pauseparam    = mlx5e_set_pauseparam,
        .get_ts_info       = mlx5e_get_ts_info,
 +      .get_wol           = mlx5e_get_wol,
 +      .set_wol           = mlx5e_set_wol,
  };
index 5063c0e0f8acb340dd259d693324769a8ed9dc9c,402994bf7e167d1ae23a8943789f712f82862f46..19e5daeaa61dfc7fb19d90d6587b900e1b83f6ad
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
 + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   *
   * This software is available to you under a choice of one of two
   * licenses.  You may choose to be licensed under the terms of the GNU
   */
  
  #include <linux/mlx5/fs.h>
 +#include <net/vxlan.h>
  #include "en.h"
  #include "eswitch.h"
 +#include "vxlan.h"
  
  struct mlx5e_rq_param {
        u32                        rqc[MLX5_ST_SZ_DW(rqc)];
@@@ -143,14 -141,15 +143,18 @@@ void mlx5e_update_stats(struct mlx5e_pr
                return;
  
        /* Collect firts the SW counters and then HW for consistency */
+       s->rx_packets           = 0;
+       s->rx_bytes             = 0;
+       s->tx_packets           = 0;
+       s->tx_bytes             = 0;
        s->tso_packets          = 0;
        s->tso_bytes            = 0;
 +      s->tso_inner_packets    = 0;
 +      s->tso_inner_bytes      = 0;
        s->tx_queue_stopped     = 0;
        s->tx_queue_wake        = 0;
        s->tx_queue_dropped     = 0;
 +      s->tx_csum_inner        = 0;
        tx_offload_none         = 0;
        s->lro_packets          = 0;
        s->lro_bytes            = 0;
        for (i = 0; i < priv->params.num_channels; i++) {
                rq_stats = &priv->channel[i]->rq.stats;
  
+               s->rx_packets   += rq_stats->packets;
+               s->rx_bytes     += rq_stats->bytes;
                s->lro_packets  += rq_stats->lro_packets;
                s->lro_bytes    += rq_stats->lro_bytes;
                s->rx_csum_none += rq_stats->csum_none;
                for (j = 0; j < priv->params.num_tc; j++) {
                        sq_stats = &priv->channel[i]->sq[j].stats;
  
+                       s->tx_packets           += sq_stats->packets;
+                       s->tx_bytes             += sq_stats->bytes;
                        s->tso_packets          += sq_stats->tso_packets;
                        s->tso_bytes            += sq_stats->tso_bytes;
 +                      s->tso_inner_packets    += sq_stats->tso_inner_packets;
 +                      s->tso_inner_bytes      += sq_stats->tso_inner_bytes;
                        s->tx_queue_stopped     += sq_stats->stopped;
                        s->tx_queue_wake        += sq_stats->wake;
                        s->tx_queue_dropped     += sq_stats->dropped;
 +                      s->tx_csum_inner        += sq_stats->csum_offload_inner;
                        tx_offload_none         += sq_stats->csum_offload_none;
                }
        }
        s->tx_broadcast_bytes   =
                MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
  
-       s->rx_packets =
-               s->rx_unicast_packets +
-               s->rx_multicast_packets +
-               s->rx_broadcast_packets;
-       s->rx_bytes =
-               s->rx_unicast_bytes +
-               s->rx_multicast_bytes +
-               s->rx_broadcast_bytes;
-       s->tx_packets =
-               s->tx_unicast_packets +
-               s->tx_multicast_packets +
-               s->tx_broadcast_packets;
-       s->tx_bytes =
-               s->tx_unicast_bytes +
-               s->tx_multicast_bytes +
-               s->tx_broadcast_bytes;
        /* Update calculated offload counters */
 -      s->tx_csum_offload = s->tx_packets - tx_offload_none;
 +      s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
        s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
                               s->rx_csum_sw;
  
@@@ -275,14 -258,9 +266,14 @@@ static void mlx5e_update_stats_work(str
        mutex_unlock(&priv->state_lock);
  }
  
 -static void __mlx5e_async_event(struct mlx5e_priv *priv,
 -                              enum mlx5_dev_event event)
 +static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
 +                            enum mlx5_dev_event event, unsigned long param)
  {
 +      struct mlx5e_priv *priv = vpriv;
 +
 +      if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
 +              return;
 +
        switch (event) {
        case MLX5_DEV_EVENT_PORT_UP:
        case MLX5_DEV_EVENT_PORT_DOWN:
        }
  }
  
 -static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
 -                            enum mlx5_dev_event event, unsigned long param)
 -{
 -      struct mlx5e_priv *priv = vpriv;
 -
 -      spin_lock(&priv->async_events_spinlock);
 -      if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
 -              __mlx5e_async_event(priv, event);
 -      spin_unlock(&priv->async_events_spinlock);
 -}
 -
  static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
  {
        set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
  
  static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
  {
 -      spin_lock_irq(&priv->async_events_spinlock);
        clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
 -      spin_unlock_irq(&priv->async_events_spinlock);
 +      synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
  }
  
  #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
@@@ -548,7 -538,7 +539,7 @@@ static int mlx5e_create_sq(struct mlx5e
        int txq_ix;
        int err;
  
 -      err = mlx5_alloc_map_uar(mdev, &sq->uar);
 +      err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
        if (err)
                return err;
  
                goto err_unmap_free_uar;
  
        sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
 -      sq->uar_map     = sq->uar.map;
 -      sq->uar_bf_map  = sq->uar.bf_map;
 +      if (sq->uar.bf_map) {
 +              set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
 +              sq->uar_map = sq->uar.bf_map;
 +      } else {
 +              sq->uar_map = sq->uar.map;
 +      }
        sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
        sq->max_inline  = param->max_inline;
  
@@@ -874,10 -860,12 +865,10 @@@ static int mlx5e_open_cq(struct mlx5e_c
        if (err)
                goto err_destroy_cq;
  
 -      err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
 -                                           moderation_usecs,
 -                                           moderation_frames);
 -      if (err)
 -              goto err_destroy_cq;
 -
 +      if (MLX5_CAP_GEN(mdev, cq_moderation))
 +              mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
 +                                             moderation_usecs,
 +                                             moderation_frames);
        return 0;
  
  err_destroy_cq:
@@@ -1066,15 -1054,6 +1057,15 @@@ static void mlx5e_build_rq_param(struc
        param->wq.linear = 1;
  }
  
 +static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
 +{
 +      void *rqc = param->rqc;
 +      void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
 +
 +      MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
 +      MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
 +}
 +
  static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
                                 struct mlx5e_sq_param *param)
  {
@@@ -1211,7 -1190,6 +1202,6 @@@ static void mlx5e_fill_indir_rqt_rqns(s
                        ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
  
                ix = priv->params.indirection_rqt[ix];
-               ix = ix % priv->params.num_channels;
                MLX5_SET(rqtc, rqtc, rq_num[i],
                         test_bit(MLX5E_STATE_OPENED, &priv->state) ?
                         priv->channel[ix]->rq.rqn :
@@@ -1329,7 -1307,22 +1319,22 @@@ static void mlx5e_build_tir_ctx_lro(voi
                              lro_timer_supported_periods[2]));
  }
  
- static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
+ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+ {
+       MLX5_SET(tirc, tirc, rx_hash_fn,
+                mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+       if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+               void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+                                            rx_hash_toeplitz_key);
+               size_t len = MLX5_FLD_SZ_BYTES(tirc,
+                                              rx_hash_toeplitz_key);
+               MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+               memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+       }
+ }
+ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
  {
        struct mlx5_core_dev *mdev = priv->mdev;
  
        void *tirc;
        int inlen;
        int err;
+       int tt;
  
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = mlx5_vzalloc(inlen);
  
        mlx5e_build_tir_ctx_lro(tirc, priv);
  
-       err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+       for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+               err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+               if (err)
+                       break;
+       }
  
        kvfree(in);
  
@@@ -1412,24 -1410,6 +1422,24 @@@ static int mlx5e_set_dev_port_mtu(struc
        return 0;
  }
  
 +static void mlx5e_netdev_set_tcs(struct net_device *netdev)
 +{
 +      struct mlx5e_priv *priv = netdev_priv(netdev);
 +      int nch = priv->params.num_channels;
 +      int ntc = priv->params.num_tc;
 +      int tc;
 +
 +      netdev_reset_tc(netdev);
 +
 +      if (ntc == 1)
 +              return;
 +
 +      netdev_set_num_tc(netdev, ntc);
 +
 +      for (tc = 0; tc < ntc; tc++)
 +              netdev_set_tc_queue(netdev, tc, nch, tc * nch);
 +}
 +
  int mlx5e_open_locked(struct net_device *netdev)
  {
        struct mlx5e_priv *priv = netdev_priv(netdev);
  
        set_bit(MLX5E_STATE_OPENED, &priv->state);
  
 +      mlx5e_netdev_set_tcs(netdev);
 +
        num_txqs = priv->params.num_channels * priv->params.num_tc;
        netif_set_real_num_tx_queues(netdev, num_txqs);
        netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
                goto err_close_channels;
        }
  
 -      mlx5e_update_carrier(priv);
        mlx5e_redirect_rqts(priv);
 +      mlx5e_update_carrier(priv);
        mlx5e_timestamp_init(priv);
  
        schedule_delayed_work(&priv->update_stats_work, 0);
@@@ -1502,8 -1480,8 +1512,8 @@@ int mlx5e_close_locked(struct net_devic
        clear_bit(MLX5E_STATE_OPENED, &priv->state);
  
        mlx5e_timestamp_cleanup(priv);
 -      mlx5e_redirect_rqts(priv);
        netif_carrier_off(priv->netdev);
 +      mlx5e_redirect_rqts(priv);
        mlx5e_close_channels(priv);
  
        return 0;
@@@ -1585,7 -1563,8 +1595,7 @@@ static int mlx5e_open_drop_rq(struct ml
  
        memset(&cq_param, 0, sizeof(cq_param));
        memset(&rq_param, 0, sizeof(rq_param));
 -      mlx5e_build_rx_cq_param(priv, &cq_param);
 -      mlx5e_build_rq_param(priv, &rq_param);
 +      mlx5e_build_drop_rq_param(&rq_param);
  
        err = mlx5e_create_drop_cq(priv, cq, &cq_param);
        if (err)
@@@ -1633,7 -1612,7 +1643,7 @@@ static int mlx5e_create_tis(struct mlx5
  
        memset(in, 0, sizeof(in));
  
 -      MLX5_SET(tisc, tisc, prio,  tc);
 +      MLX5_SET(tisc, tisc, prio, tc << 1);
        MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
  
        return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
@@@ -1649,7 -1628,7 +1659,7 @@@ static int mlx5e_create_tises(struct ml
        int err;
        int tc;
  
 -      for (tc = 0; tc < priv->params.num_tc; tc++) {
 +      for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) {
                err = mlx5e_create_tis(priv, tc);
                if (err)
                        goto err_close_tises;
@@@ -1668,7 -1647,7 +1678,7 @@@ static void mlx5e_destroy_tises(struct 
  {
        int tc;
  
 -      for (tc = 0; tc < priv->params.num_tc; tc++)
 +      for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++)
                mlx5e_destroy_tis(priv, tc);
  }
  
@@@ -1703,17 -1682,7 +1713,7 @@@ static void mlx5e_build_tir_ctx(struct 
        default:
                MLX5_SET(tirc, tirc, indirect_table,
                         priv->rqtn[MLX5E_INDIRECTION_RQT]);
-               MLX5_SET(tirc, tirc, rx_hash_fn,
-                        mlx5e_rx_hash_fn(priv->params.rss_hfunc));
-               if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
-                       void *rss_key = MLX5_ADDR_OF(tirc, tirc,
-                                                    rx_hash_toeplitz_key);
-                       size_t len = MLX5_FLD_SZ_BYTES(tirc,
-                                                      rx_hash_toeplitz_key);
-                       MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
-                       memcpy(rss_key, priv->params.toeplitz_hash_key, len);
-               }
+               mlx5e_build_tir_ctx_hash(tirc, priv);
                break;
        }
  
@@@ -1855,40 -1824,6 +1855,40 @@@ static void mlx5e_destroy_tirs(struct m
                mlx5e_destroy_tir(priv, i);
  }
  
 +static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
 +{
 +      struct mlx5e_priv *priv = netdev_priv(netdev);
 +      bool was_opened;
 +      int err = 0;
 +
 +      if (tc && tc != MLX5E_MAX_NUM_TC)
 +              return -EINVAL;
 +
 +      mutex_lock(&priv->state_lock);
 +
 +      was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
 +      if (was_opened)
 +              mlx5e_close_locked(priv->netdev);
 +
 +      priv->params.num_tc = tc ? tc : 1;
 +
 +      if (was_opened)
 +              err = mlx5e_open_locked(priv->netdev);
 +
 +      mutex_unlock(&priv->state_lock);
 +
 +      return err;
 +}
 +
 +static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
 +                            __be16 proto, struct tc_to_netdev *tc)
 +{
 +      if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
 +              return -EINVAL;
 +
 +      return mlx5e_setup_tc(dev, tc->tc);
 +}
 +
  static struct rtnl_link_stats64 *
  mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
  {
@@@ -1950,8 -1885,10 +1950,10 @@@ static int mlx5e_set_features(struct ne
                        mlx5e_close_locked(priv->netdev);
  
                priv->params.lro_en = !!(features & NETIF_F_LRO);
-               mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
-               mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
+               err = mlx5e_modify_tirs_lro(priv);
+               if (err)
+                       mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
+                                      err);
  
                if (was_opened)
                        err = mlx5e_open_locked(priv->netdev);
@@@ -2089,84 -2026,10 +2091,84 @@@ static int mlx5e_get_vf_stats(struct ne
                                            vf_stats);
  }
  
 +static void mlx5e_add_vxlan_port(struct net_device *netdev,
 +                               sa_family_t sa_family, __be16 port)
 +{
 +      struct mlx5e_priv *priv = netdev_priv(netdev);
 +
 +      if (!mlx5e_vxlan_allowed(priv->mdev))
 +              return;
 +
 +      mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
 +}
 +
 +static void mlx5e_del_vxlan_port(struct net_device *netdev,
 +                               sa_family_t sa_family, __be16 port)
 +{
 +      struct mlx5e_priv *priv = netdev_priv(netdev);
 +
 +      if (!mlx5e_vxlan_allowed(priv->mdev))
 +              return;
 +
 +      mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
 +}
 +
 +static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
 +                                                  struct sk_buff *skb,
 +                                                  netdev_features_t features)
 +{
 +      struct udphdr *udph;
 +      u16 proto;
 +      u16 port = 0;
 +
 +      switch (vlan_get_protocol(skb)) {
 +      case htons(ETH_P_IP):
 +              proto = ip_hdr(skb)->protocol;
 +              break;
 +      case htons(ETH_P_IPV6):
 +              proto = ipv6_hdr(skb)->nexthdr;
 +              break;
 +      default:
 +              goto out;
 +      }
 +
 +      if (proto == IPPROTO_UDP) {
 +              udph = udp_hdr(skb);
 +              port = be16_to_cpu(udph->dest);
 +      }
 +
 +      /* Verify if UDP port is being offloaded by HW */
 +      if (port && mlx5e_vxlan_lookup_port(priv, port))
 +              return features;
 +
 +out:
 +      /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
 +      return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 +}
 +
 +static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
 +                                            struct net_device *netdev,
 +                                            netdev_features_t features)
 +{
 +      struct mlx5e_priv *priv = netdev_priv(netdev);
 +
 +      features = vlan_features_check(skb, features);
 +      features = vxlan_features_check(skb, features);
 +
 +      /* Validate if the tunneled packet is being offloaded by HW */
 +      if (skb->encapsulation &&
 +          (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
 +              return mlx5e_vxlan_features_check(priv, skb, features);
 +
 +      return features;
 +}
 +
  static const struct net_device_ops mlx5e_netdev_ops_basic = {
        .ndo_open                = mlx5e_open,
        .ndo_stop                = mlx5e_close,
        .ndo_start_xmit          = mlx5e_xmit,
 +      .ndo_setup_tc            = mlx5e_ndo_setup_tc,
 +      .ndo_select_queue        = mlx5e_select_queue,
        .ndo_get_stats64         = mlx5e_get_stats,
        .ndo_set_rx_mode         = mlx5e_set_rx_mode,
        .ndo_set_mac_address     = mlx5e_set_mac,
@@@ -2181,8 -2044,6 +2183,8 @@@ static const struct net_device_ops mlx5
        .ndo_open                = mlx5e_open,
        .ndo_stop                = mlx5e_close,
        .ndo_start_xmit          = mlx5e_xmit,
 +      .ndo_setup_tc            = mlx5e_ndo_setup_tc,
 +      .ndo_select_queue        = mlx5e_select_queue,
        .ndo_get_stats64         = mlx5e_get_stats,
        .ndo_set_rx_mode         = mlx5e_set_rx_mode,
        .ndo_set_mac_address     = mlx5e_set_mac,
        .ndo_set_features        = mlx5e_set_features,
        .ndo_change_mtu          = mlx5e_change_mtu,
        .ndo_do_ioctl            = mlx5e_ioctl,
 +      .ndo_add_vxlan_port      = mlx5e_add_vxlan_port,
 +      .ndo_del_vxlan_port      = mlx5e_del_vxlan_port,
 +      .ndo_features_check      = mlx5e_features_check,
        .ndo_set_vf_mac          = mlx5e_set_vf_mac,
        .ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
        .ndo_get_vf_config       = mlx5e_get_vf_config,
@@@ -2220,8 -2078,6 +2222,8 @@@ static int mlx5e_check_required_hca_cap
        }
        if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
                mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
 +      if (!MLX5_CAP_GEN(mdev, cq_moderation))
 +              mlx5_core_warn(mdev, "CQ modiration is not supported\n");
  
        return 0;
  }
@@@ -2235,30 -2091,20 +2237,38 @@@ u16 mlx5e_get_max_inline_cap(struct mlx
               2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
  }
  
 +#ifdef CONFIG_MLX5_CORE_EN_DCB
 +static void mlx5e_ets_init(struct mlx5e_priv *priv)
 +{
 +      int i;
 +
 +      priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
 +      for (i = 0; i < priv->params.ets.ets_cap; i++) {
 +              priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
 +              priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
 +              priv->params.ets.prio_tc[i] = i;
 +      }
 +
 +      /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
 +      priv->params.ets.prio_tc[0] = 1;
 +      priv->params.ets.prio_tc[1] = 0;
 +}
 +#endif
 +
+ void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+                                  int num_channels)
+ {
+       int i;
+       for (i = 0; i < len; i++)
+               indirection_rqt[i] = i % num_channels;
+ }
  static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
                                    struct net_device *netdev,
                                    int num_channels)
  {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       int i;
  
        priv->params.log_sq_size           =
                MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
        priv->params.min_rx_wqes           =
                MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
        priv->params.num_tc                = 1;
 -      priv->params.default_vlan_prio     = 0;
        priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
  
        netdev_rss_key_fill(priv->params.toeplitz_hash_key,
                            sizeof(priv->params.toeplitz_hash_key));
  
-       for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
-               priv->params.indirection_rqt[i] = i % num_channels;
+       mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+                                     MLX5E_INDIR_RQT_SIZE, num_channels);
  
        priv->params.lro_wqe_sz            =
                MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
        priv->mdev                         = mdev;
        priv->netdev                       = netdev;
        priv->params.num_channels          = num_channels;
 -      priv->default_vlan_prio            = priv->params.default_vlan_prio;
  
 -      spin_lock_init(&priv->async_events_spinlock);
 +#ifdef CONFIG_MLX5_CORE_EN_DCB
 +      mlx5e_ets_init(priv);
 +#endif
 +
        mutex_init(&priv->state_lock);
  
        INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
@@@ -2321,14 -2166,10 +2331,14 @@@ static void mlx5e_build_netdev(struct n
  
        SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
  
 -      if (MLX5_CAP_GEN(mdev, vport_group_manager))
 +      if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
                netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
 -      else
 +#ifdef CONFIG_MLX5_CORE_EN_DCB
 +              netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
 +#endif
 +      } else {
                netdev->netdev_ops = &mlx5e_netdev_ops_basic;
 +      }
  
        netdev->watchdog_timeo    = 15 * HZ;
  
        netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
        netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
  
 +      if (mlx5e_vxlan_allowed(mdev)) {
 +              netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL;
 +              netdev->hw_enc_features |= NETIF_F_IP_CSUM;
 +              netdev->hw_enc_features |= NETIF_F_RXCSUM;
 +              netdev->hw_enc_features |= NETIF_F_TSO;
 +              netdev->hw_enc_features |= NETIF_F_TSO6;
 +              netdev->hw_enc_features |= NETIF_F_RXHASH;
 +              netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
 +      }
 +
        netdev->features          = netdev->hw_features;
        if (!priv->params.lro_en)
                netdev->features  &= ~NETIF_F_LRO;
@@@ -2407,9 -2238,7 +2417,9 @@@ static void *mlx5e_create_netdev(struc
        if (mlx5e_check_required_hca_cap(mdev))
                return NULL;
  
 -      netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
 +      netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
 +                                  nch * MLX5E_MAX_NUM_TC,
 +                                  nch);
        if (!netdev) {
                mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
                return NULL;
  
        priv = netdev_priv(netdev);
  
 -      err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
 +      err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
        if (err) {
                mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
                goto err_free_netdev;
  
        mlx5e_init_eth_addr(priv);
  
 +      mlx5e_vxlan_init(priv);
 +
 +#ifdef CONFIG_MLX5_CORE_EN_DCB
 +      mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
 +#endif
 +
        err = register_netdev(netdev);
        if (err) {
                mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
                goto err_destroy_flow_tables;
        }
  
 +      if (mlx5e_vxlan_allowed(mdev))
 +              vxlan_get_rx_port(netdev);
 +
        mlx5e_enable_async_events(priv);
        schedule_work(&priv->set_rx_mode_work);
  
@@@ -2551,7 -2371,6 +2561,7 @@@ static void mlx5e_destroy_netdev(struc
        mlx5e_disable_async_events(priv);
        flush_scheduled_work();
        unregister_netdev(netdev);
 +      mlx5e_vxlan_cleanup(priv);
        mlx5e_destroy_flow_tables(priv);
        mlx5e_destroy_tirs(priv);
        mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
index 519a07f253f97e4786b4d658cf81871381f5fb55,59658b9d05d1fc57c9da02f39f96a245929f42e4..884ed19cded268a653ebe60adbbb28ddd55b27e8
@@@ -167,15 -167,14 +167,15 @@@ static inline bool is_first_ethertype_i
  static inline void mlx5e_handle_csum(struct net_device *netdev,
                                     struct mlx5_cqe64 *cqe,
                                     struct mlx5e_rq *rq,
 -                                   struct sk_buff *skb)
 +                                   struct sk_buff *skb,
 +                                   bool   lro)
  {
        if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
                goto csum_none;
  
 -      if (likely(cqe->hds_ip_ext & CQE_L4_OK)) {
 +      if (lro) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 -      } else if (is_first_ethertype_ip(skb)) {
 +      } else if (likely(is_first_ethertype_ip(skb))) {
                skb->ip_summed = CHECKSUM_COMPLETE;
                skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
                rq->stats.csum_sw++;
@@@ -212,7 -211,7 +212,7 @@@ static inline void mlx5e_build_rx_skb(s
        if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
                mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
  
 -      mlx5e_handle_csum(netdev, cqe, rq, skb);
 +      mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
  
        skb->protocol = eth_type_trans(skb, netdev);
  
@@@ -231,10 -230,6 +231,6 @@@ int mlx5e_poll_rx_cq(struct mlx5e_cq *c
        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
        int work_done;
  
-       /* avoid accessing cq (dma coherent memory) if not needed */
-       if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
-               return 0;
        for (work_done = 0; work_done < budget; work_done++) {
                struct mlx5e_rx_wqe *wqe;
                struct mlx5_cqe64 *cqe;
  
                mlx5e_build_rx_skb(cqe, rq, skb);
                rq->stats.packets++;
+               rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
                napi_gro_receive(cq->napi, skb);
  
  wq_ll_pop:
        /* ensure cq space is freed before enabling more cqes */
        wmb();
  
-       if (work_done == budget)
-               set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
        return work_done;
  }
index c34f4f3e9537eeecdb95d1cd0a1fb6f580ef27a3,bb4eeeb007dec48eb022a6917f69d7fd5e1e5071..94a14f85f70d87a0ed49ef4f961db2cb745fe073
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
 + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   *
   * This software is available to you under a choice of one of two
   * licenses.  You may choose to be licensed under the terms of the GNU
@@@ -109,10 -109,12 +109,10 @@@ u16 mlx5e_select_queue(struct net_devic
  {
        struct mlx5e_priv *priv = netdev_priv(dev);
        int channel_ix = fallback(dev, skb);
 -      int up = skb_vlan_tag_present(skb)        ?
 -               skb->vlan_tci >> VLAN_PRIO_SHIFT :
 -               priv->default_vlan_prio;
 -      int tc = netdev_get_prio_tc_map(dev, up);
 +      int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
 +               skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
  
 -      return priv->channeltc_to_txq_map[channel_ix][tc];
 +      return priv->channeltc_to_txq_map[channel_ix][up];
  }
  
  static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
@@@ -177,6 -179,7 +177,7 @@@ static netdev_tx_t mlx5e_sq_xmit(struc
        unsigned int skb_len = skb->len;
        u8  opcode = MLX5_OPCODE_SEND;
        dma_addr_t dma_addr = 0;
+       unsigned int num_bytes;
        bool bf = false;
        u16 headlen;
        u16 ds_cnt;
  
        memset(wqe, 0, sizeof(*wqe));
  
 -      if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
 -              eseg->cs_flags  = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
 -      else
 +      if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
 +              eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
 +              if (skb->encapsulation) {
 +                      eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
 +                                        MLX5_ETH_WQE_L4_INNER_CSUM;
 +                      sq->stats.csum_offload_inner++;
 +              } else {
 +                      eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
 +              }
 +      } else
                sq->stats.csum_offload_none++;
  
        if (sq->cc != sq->prev_cc) {
        }
  
        if (skb_is_gso(skb)) {
 -              u32 payload_len;
 -
                eseg->mss    = cpu_to_be16(skb_shinfo(skb)->gso_size);
                opcode       = MLX5_OPCODE_LSO;
 -              ihs          = skb_transport_offset(skb) + tcp_hdrlen(skb);
 -              payload_len  = skb->len - ihs;
 +
 +              if (skb->encapsulation) {
 +                      ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
 +                      sq->stats.tso_inner_packets++;
 +                      sq->stats.tso_inner_bytes += skb->len - ihs;
 +              } else {
 +                      ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
 +                      sq->stats.tso_packets++;
 +                      sq->stats.tso_bytes += skb->len - ihs;
 +              }
 +
-               wi->num_bytes = skb->len +
-                               (skb_shinfo(skb)->gso_segs - 1) * ihs;
+               num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
 -              sq->stats.tso_packets++;
 -              sq->stats.tso_bytes += payload_len;
        } else {
                bf = sq->bf_budget &&
                     !skb->xmit_more &&
                     !skb_shinfo(skb)->nr_frags;
                ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
-               wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+               num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
        }
  
+       wi->num_bytes = num_bytes;
        if (skb_vlan_tag_present(skb)) {
                mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
                                  &skb_len);
        if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
                int bf_sz = 0;
  
 -              if (bf && sq->uar_bf_map)
 +              if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
                        bf_sz = wi->num_wqebbs << 3;
  
                cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
        sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
  
        sq->stats.packets++;
+       sq->stats.bytes += num_bytes;
        return NETDEV_TX_OK;
  
  dma_unmap_wqe_err:
@@@ -345,10 -338,6 +348,6 @@@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *
        u16 sqcc;
        int i;
  
-       /* avoid accessing cq (dma coherent memory) if not needed */
-       if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
-               return false;
        sq = container_of(cq, struct mlx5e_sq, cq);
  
        npkts = 0;
                                netif_tx_wake_queue(sq->txq);
                                sq->stats.wake++;
        }
-       if (i == MLX5E_TX_CQ_POLL_BUDGET) {
-               set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
-               return true;
-       }
  
-       return false;
+       return (i == MLX5E_TX_CQ_POLL_BUDGET);
  }
index 53487d3eb9f62aaf231d5efb6a3cd33f61db86c1,a94daa8c346ca11ca10f6eed09b0c15a28dde2c4..4afbc3e9e381bffff2bdabad30bee6653ef21eae
@@@ -49,7 -49,6 +49,7 @@@
  #include <linux/jiffies.h>
  #include <linux/bitops.h>
  #include <linux/list.h>
 +#include <net/devlink.h>
  #include <net/switchdev.h>
  #include <generated/utsrelease.h>
  
@@@ -305,47 -304,21 +305,47 @@@ mlxsw_sp_port_system_port_mapping_set(s
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
  }
  
 -static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
 -                                    bool *p_usable)
 +static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
 +                                       u8 local_port, u8 *p_module,
 +                                       u8 *p_width)
  {
 -      struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        char pmlp_pl[MLXSW_REG_PMLP_LEN];
        int err;
  
 -      mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
 +      mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
        if (err)
                return err;
 -      *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
 +      *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
 +      *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
        return 0;
  }
  
 +static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 +                                  u8 module, u8 width, u8 lane)
 +{
 +      char pmlp_pl[MLXSW_REG_PMLP_LEN];
 +      int i;
 +
 +      mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
 +      mlxsw_reg_pmlp_width_set(pmlp_pl, width);
 +      for (i = 0; i < width; i++) {
 +              mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
 +              mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
 +      }
 +
 +      return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 +}
 +
 +static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 +{
 +      char pmlp_pl[MLXSW_REG_PMLP_LEN];
 +
 +      mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
 +      mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
 +      return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 +}
 +
  static int mlxsw_sp_port_open(struct net_device *dev)
  {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
@@@ -1300,18 -1273,6 +1300,18 @@@ static u32 mlxsw_sp_to_ptys_speed(u32 s
        return ptys_proto;
  }
  
 +static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
 +{
 +      u32 ptys_proto = 0;
 +      int i;
 +
 +      for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
 +              if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
 +                      ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
 +      }
 +      return ptys_proto;
 +}
 +
  static int mlxsw_sp_port_set_settings(struct net_device *dev,
                                      struct ethtool_cmd *cmd)
  {
@@@ -1388,27 -1349,11 +1388,27 @@@ static const struct ethtool_ops mlxsw_s
        .set_settings           = mlxsw_sp_port_set_settings,
  };
  
 -static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 +static int
 +mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
 +{
 +      struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 +      u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
 +      char ptys_pl[MLXSW_REG_PTYS_LEN];
 +      u32 eth_proto_admin;
 +
 +      eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
 +      mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
 +                          eth_proto_admin);
 +      return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
 +}
 +
 +static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 +                                bool split, u8 module, u8 width)
  {
 +      struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
        struct mlxsw_sp_port *mlxsw_sp_port;
 +      struct devlink_port *devlink_port;
        struct net_device *dev;
 -      bool usable;
        size_t bytes;
        int err;
  
        mlxsw_sp_port->dev = dev;
        mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
        mlxsw_sp_port->local_port = local_port;
 +      mlxsw_sp_port->split = split;
        bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
        mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
        if (!mlxsw_sp_port->active_vlans) {
         */
        dev->hard_header_len += MLXSW_TXHDR_LEN;
  
 -      err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
 +      devlink_port = &mlxsw_sp_port->devlink_port;
 +      if (mlxsw_sp_port->split)
 +              devlink_port_split_set(devlink_port, module);
 +      err = devlink_port_register(devlink, devlink_port, local_port);
        if (err) {
 -              dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
 +              dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
                        mlxsw_sp_port->local_port);
 -              goto err_port_module_check;
 -      }
 -
 -      if (!usable) {
 -              dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
 -                      mlxsw_sp_port->local_port);
 -              goto port_not_usable;
 +              goto err_devlink_port_register;
        }
  
        err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
                goto err_port_swid_set;
        }
  
 +      err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
 +      if (err) {
 +              dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
 +                      mlxsw_sp_port->local_port);
 +              goto err_port_speed_by_width_set;
 +      }
 +
        err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
                goto err_register_netdev;
        }
  
 +      devlink_port_type_eth_set(devlink_port, dev);
 +
        err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
        if (err)
                goto err_port_vlan_init;
@@@ -1532,11 -1470,10 +1532,11 @@@ err_register_netdev
  err_port_buffers_init:
  err_port_admin_status_set:
  err_port_mtu_set:
 +err_port_speed_by_width_set:
  err_port_swid_set:
  err_port_system_port_mapping_set:
 -port_not_usable:
 -err_port_module_check:
 +      devlink_port_unregister(&mlxsw_sp_port->devlink_port);
 +err_devlink_port_register:
  err_dev_addr_init:
        free_percpu(mlxsw_sp_port->pcpu_stats);
  err_alloc_stats:
@@@ -1548,28 -1485,6 +1548,28 @@@ err_port_active_vlans_alloc
        return err;
  }
  
 +static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 +                              bool split, u8 module, u8 width, u8 lane)
 +{
 +      int err;
 +
 +      err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
 +                                     lane);
 +      if (err)
 +              return err;
 +
 +      err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
 +                                   width);
 +      if (err)
 +              goto err_port_create;
 +
 +      return 0;
 +
 +err_port_create:
 +      mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
 +      return err;
 +}
 +
  static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
  {
        struct net_device *dev = mlxsw_sp_port->dev;
  static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
  {
        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
 +      struct devlink_port *devlink_port;
  
        if (!mlxsw_sp_port)
                return;
 +      mlxsw_sp->ports[local_port] = NULL;
 +      devlink_port = &mlxsw_sp_port->devlink_port;
 +      devlink_port_type_clear(devlink_port);
        unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
 +      devlink_port_unregister(devlink_port);
        mlxsw_sp_port_vports_fini(mlxsw_sp_port);
        mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
 +      mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
 +      mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
        free_percpu(mlxsw_sp_port->pcpu_stats);
        kfree(mlxsw_sp_port->untagged_vlans);
        kfree(mlxsw_sp_port->active_vlans);
@@@ -1621,7 -1529,6 +1621,7 @@@ static void mlxsw_sp_ports_remove(struc
  static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
  {
        size_t alloc_size;
 +      u8 module, width;
        int i;
        int err;
  
                return -ENOMEM;
  
        for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
 -              err = mlxsw_sp_port_create(mlxsw_sp, i);
 +              err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
 +                                                  &width);
 +              if (err)
 +                      goto err_port_module_info_get;
 +              if (!width)
 +                      continue;
 +              mlxsw_sp->port_to_module[i] = module;
 +              err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
                if (err)
                        goto err_port_create;
        }
        return 0;
  
  err_port_create:
 +err_port_module_info_get:
        for (i--; i >= 1; i--)
                mlxsw_sp_port_remove(mlxsw_sp, i);
        kfree(mlxsw_sp->ports);
        return err;
  }
  
 +static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
 +{
 +      u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
 +
 +      return local_port - offset;
 +}
 +
 +static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
 +{
 +      struct mlxsw_sp *mlxsw_sp = priv;
 +      struct mlxsw_sp_port *mlxsw_sp_port;
 +      u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
 +      u8 module, cur_width, base_port;
 +      int i;
 +      int err;
 +
 +      mlxsw_sp_port = mlxsw_sp->ports[local_port];
 +      if (!mlxsw_sp_port) {
 +              dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
 +                      local_port);
 +              return -EINVAL;
 +      }
 +
 +      if (count != 2 && count != 4) {
 +              netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
 +              return -EINVAL;
 +      }
 +
 +      err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
 +                                          &cur_width);
 +      if (err) {
 +              netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
 +              return err;
 +      }
 +
 +      if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
 +              netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
 +              return -EINVAL;
 +      }
 +
 +      /* Make sure we have enough slave (even) ports for the split. */
 +      if (count == 2) {
 +              base_port = local_port;
 +              if (mlxsw_sp->ports[base_port + 1]) {
 +                      netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
 +                      return -EINVAL;
 +              }
 +      } else {
 +              base_port = mlxsw_sp_cluster_base_port_get(local_port);
 +              if (mlxsw_sp->ports[base_port + 1] ||
 +                  mlxsw_sp->ports[base_port + 3]) {
 +                      netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
 +                      return -EINVAL;
 +              }
 +      }
 +
 +      for (i = 0; i < count; i++)
 +              mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
 +
 +      for (i = 0; i < count; i++) {
 +              err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
 +                                         module, width, i * width);
 +              if (err) {
 +                      dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
 +                      goto err_port_create;
 +              }
 +      }
 +
 +      return 0;
 +
 +err_port_create:
 +      for (i--; i >= 0; i--)
 +              mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
 +      for (i = 0; i < count / 2; i++) {
 +              module = mlxsw_sp->port_to_module[base_port + i * 2];
 +              mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
 +                                   module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
 +      }
 +      return err;
 +}
 +
 +static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
 +{
 +      struct mlxsw_sp *mlxsw_sp = priv;
 +      struct mlxsw_sp_port *mlxsw_sp_port;
 +      u8 module, cur_width, base_port;
 +      unsigned int count;
 +      int i;
 +      int err;
 +
 +      mlxsw_sp_port = mlxsw_sp->ports[local_port];
 +      if (!mlxsw_sp_port) {
 +              dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
 +                      local_port);
 +              return -EINVAL;
 +      }
 +
 +      if (!mlxsw_sp_port->split) {
 +              netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
 +              return -EINVAL;
 +      }
 +
 +      err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
 +                                          &cur_width);
 +      if (err) {
 +              netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
 +              return err;
 +      }
 +      count = cur_width == 1 ? 4 : 2;
 +
 +      base_port = mlxsw_sp_cluster_base_port_get(local_port);
 +
 +      /* Determine which ports to remove. */
 +      if (count == 2 && local_port >= base_port + 2)
 +              base_port = base_port + 2;
 +
 +      for (i = 0; i < count; i++)
 +              mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
 +
 +      for (i = 0; i < count / 2; i++) {
 +              module = mlxsw_sp->port_to_module[base_port + i * 2];
 +              err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
 +                                         module, MLXSW_PORT_MODULE_MAX_WIDTH,
 +                                         0);
 +              if (err)
 +                      dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
 +      }
 +
 +      return 0;
 +}
 +
  static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
                                     char *pude_pl, void *priv)
  {
@@@ -2206,8 -1974,6 +2206,8 @@@ static struct mlxsw_driver mlxsw_sp_dri
        .priv_size              = sizeof(struct mlxsw_sp),
        .init                   = mlxsw_sp_init,
        .fini                   = mlxsw_sp_fini,
 +      .port_split             = mlxsw_sp_port_split,
 +      .port_unsplit           = mlxsw_sp_port_unsplit,
        .txhdr_construct        = mlxsw_sp_txhdr_construct,
        .txhdr_len              = MLXSW_TXHDR_LEN,
        .profile                = &mlxsw_sp_config_profile,
@@@ -2592,9 -2358,7 +2592,7 @@@ static int mlxsw_sp_port_lag_leave(stru
        if (mlxsw_sp_port->bridged) {
                mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
                mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
-               if (lag->ref_count == 1)
-                       mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
+               mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
        }
  
        if (lag->ref_count == 1) {
index 88656ceb6e2946662b63fd0a6120ce0777d88a18,86449c357168ebb4cd6c6fa25d1503583b8cd82d..8f2c4fb4c7246d6cae26e09c33a31c0573c85b00
@@@ -2,7 -2,7 +2,7 @@@
   *
   * Copyright (C) 2014-2015 Renesas Electronics Corporation
   * Copyright (C) 2015 Renesas Solutions Corp.
 - * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
 + * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
   *
   * Based on the SuperH Ethernet driver
   *
                 NETIF_MSG_RX_ERR | \
                 NETIF_MSG_TX_ERR)
  
 +void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
 +               u32 set)
 +{
 +      ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
 +}
 +
  int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
  {
        int i;
@@@ -65,7 -59,8 +65,7 @@@ static int ravb_config(struct net_devic
        int error;
  
        /* Set config mode */
 -      ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
 -                 CCC);
 +      ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
        /* Check if the operating mode is changed to the config mode */
        error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
        if (error)
  static void ravb_set_duplex(struct net_device *ndev)
  {
        struct ravb_private *priv = netdev_priv(ndev);
 -      u32 ecmr = ravb_read(ndev, ECMR);
  
 -      if (priv->duplex)       /* Full */
 -              ecmr |=  ECMR_DM;
 -      else                    /* Half */
 -              ecmr &= ~ECMR_DM;
 -      ravb_write(ndev, ecmr, ECMR);
 +      ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0);
  }
  
  static void ravb_set_rate(struct net_device *ndev)
@@@ -92,6 -92,8 +92,6 @@@
        case 1000:              /* 1000BASE */
                ravb_write(ndev, GECMR_SPEED_1000, GECMR);
                break;
 -      default:
 -              break;
        }
  }
  
@@@ -129,8 -131,13 +129,8 @@@ static void ravb_mdio_ctrl(struct mdiob
  {
        struct ravb_private *priv = container_of(ctrl, struct ravb_private,
                                                 mdiobb);
 -      u32 pir = ravb_read(priv->ndev, PIR);
  
 -      if (set)
 -              pir |=  mask;
 -      else
 -              pir &= ~mask;
 -      ravb_write(priv->ndev, pir, PIR);
 +      ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
  }
  
  /* MDC pin control */
@@@ -386,9 -393,9 +386,9 @@@ static int ravb_dmac_init(struct net_de
        ravb_ring_format(ndev, RAVB_NC);
  
  #if defined(__LITTLE_ENDIAN)
 -      ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
 +      ravb_modify(ndev, CCC, CCC_BOC, 0);
  #else
 -      ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
 +      ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC);
  #endif
  
        /* Set AVB RX */
        ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
  
        /* Setting the control will start the AVB-DMAC process. */
 -      ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
 -                 CCC);
 +      ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
  
        return 0;
  }
@@@ -485,7 -493,7 +485,7 @@@ static void ravb_get_tx_tstamp(struct n
                                break;
                        }
                }
 -              ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
 +              ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
        }
  }
  
@@@ -605,13 -613,13 +605,13 @@@ static bool ravb_rx(struct net_device *
  static void ravb_rcv_snd_disable(struct net_device *ndev)
  {
        /* Disable TX and RX */
 -      ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
 +      ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
  }
  
  static void ravb_rcv_snd_enable(struct net_device *ndev)
  {
        /* Enable TX and RX */
 -      ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
 +      ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
  }
  
  /* function for waiting dma process finished */
@@@ -804,8 -812,8 +804,8 @@@ static int ravb_poll(struct napi_struc
  
        /* Re-enable RX/TX interrupts */
        spin_lock_irqsave(&priv->lock, flags);
 -      ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
 -      ravb_write(ndev, ravb_read(ndev, TIC)  | mask,  TIC);
 +      ravb_modify(ndev, RIC0, mask, mask);
 +      ravb_modify(ndev, TIC,  mask, mask);
        mmiowb();
        spin_unlock_irqrestore(&priv->lock, flags);
  
@@@ -844,7 -852,8 +844,7 @@@ static void ravb_adjust_link(struct net
                        ravb_set_rate(ndev);
                }
                if (!priv->link) {
 -                      ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
 -                                 ECMR);
 +                      ravb_modify(ndev, ECMR, ECMR_TXF, 0);
                        new_state = true;
                        priv->link = phydev->link;
                        if (priv->no_avb_link)
@@@ -1388,7 -1397,7 +1388,7 @@@ static netdev_tx_t ravb_start_xmit(stru
        desc--;
        desc->die_dt = DT_FSTART;
  
 -      ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
 +      ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
  
        priv->cur_tx[q] += NUM_TX_DESC;
        if (priv->cur_tx[q] - priv->dirty_tx[q] >
@@@ -1463,10 -1472,15 +1463,10 @@@ static void ravb_set_rx_mode(struct net
  {
        struct ravb_private *priv = netdev_priv(ndev);
        unsigned long flags;
 -      u32 ecmr;
  
        spin_lock_irqsave(&priv->lock, flags);
 -      ecmr = ravb_read(ndev, ECMR);
 -      if (ndev->flags & IFF_PROMISC)
 -              ecmr |=  ECMR_PRM;
 -      else
 -              ecmr &= ~ECMR_PRM;
 -      ravb_write(ndev, ecmr, ECMR);
 +      ravb_modify(ndev, ECMR, ECMR_PRM,
 +                  ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
        mmiowb();
        spin_unlock_irqrestore(&priv->lock, flags);
  }
@@@ -1708,7 -1722,6 +1708,6 @@@ static int ravb_set_gti(struct net_devi
  static int ravb_probe(struct platform_device *pdev)
  {
        struct device_node *np = pdev->dev.of_node;
-       const struct of_device_id *match;
        struct ravb_private *priv;
        enum ravb_chip_id chip_id;
        struct net_device *ndev;
        ndev->base_addr = res->start;
        ndev->dma = -1;
  
-       match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev);
-       chip_id = (enum ravb_chip_id)match->data;
+       chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
  
        if (chip_id == RCAR_GEN3)
                irq = platform_get_irq_byname(pdev, "ch22");
  
        /* Set AVB config mode */
        if (chip_id == RCAR_GEN2) {
 -              ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) |
 -                         CCC_OPC_CONFIG, CCC);
 +              ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
                /* Set CSEL value */
 -              ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) |
 -                         CCC_CSEL_HPB, CCC);
 +              ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
        } else {
 -              ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) |
 -                         CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC);
 +              ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
 +                          CCC_GAC | CCC_CSEL_HPB);
        }
  
        /* Set GTI value */
                goto out_release;
  
        /* Request GTI loading */
 -      ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
 +      ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
  
        /* Allocate descriptor base address table */
        priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
index a2767336b7c54512cb68656ad406ec0bf43574fc,7384499928761612775af0f8b1eaf701fcbb4d18..9c6448915b653226ad59c1ea3ac27e2937c2d417
@@@ -3,7 -3,7 +3,7 @@@
   *  Copyright (C) 2014  Renesas Electronics Corporation
   *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
   *  Copyright (C) 2008-2014 Renesas Solutions Corp.
 - *  Copyright (C) 2013-2014 Cogent Embedded, Inc.
 + *  Copyright (C) 2013-2016 Cogent Embedded, Inc.
   *  Copyright (C) 2014 Codethink Limited
   *
   *  This program is free software; you can redistribute it and/or modify it
@@@ -428,13 -428,6 +428,13 @@@ static u32 sh_eth_read(struct net_devic
        return ioread32(mdp->addr + offset);
  }
  
 +static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
 +                        u32 set)
 +{
 +      sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
 +                   enum_index);
 +}
 +
  static bool sh_eth_is_gether(struct sh_eth_private *mdp)
  {
        return mdp->reg_offset == sh_eth_offset_gigabit;
@@@ -474,7 -467,10 +474,7 @@@ static void sh_eth_set_duplex(struct ne
  {
        struct sh_eth_private *mdp = netdev_priv(ndev);
  
 -      if (mdp->duplex) /* Full */
 -              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
 -      else            /* Half */
 -              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
 +      sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
  }
  
  static void sh_eth_chip_reset(struct net_device *ndev)
@@@ -500,6 -496,8 +500,6 @@@ static void sh_eth_set_rate_gether(stru
        case 1000: /* 1000BASE */
                sh_eth_write(ndev, GECMR_1000, GECMR);
                break;
 -      default:
 -              break;
        }
  }
  
@@@ -585,10 -583,12 +585,10 @@@ static void sh_eth_set_rate_r8a777x(str
  
        switch (mdp->speed) {
        case 10: /* 10BASE */
 -              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
 +              sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
                break;
        case 100:/* 100BASE */
 -              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
 -              break;
 -      default:
 +              sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
                break;
        }
  }
@@@ -649,10 -649,12 +649,10 @@@ static void sh_eth_set_rate_sh7724(stru
  
        switch (mdp->speed) {
        case 10: /* 10BASE */
 -              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
 +              sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
                break;
        case 100:/* 100BASE */
 -              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
 -              break;
 -      default:
 +              sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
                break;
        }
  }
@@@ -692,6 -694,8 +692,6 @@@ static void sh_eth_set_rate_sh7757(stru
        case 100:/* 100BASE */
                sh_eth_write(ndev, 1, RTRATE);
                break;
 -      default:
 -              break;
        }
  }
  
@@@ -759,6 -763,8 +759,6 @@@ static void sh_eth_set_rate_giga(struc
        case 1000: /* 1000BASE */
                sh_eth_write(ndev, 0x00000020, GECMR);
                break;
 -      default:
 -              break;
        }
  }
  
@@@ -918,7 -924,8 +918,7 @@@ static int sh_eth_reset(struct net_devi
  
        if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
                sh_eth_write(ndev, EDSR_ENALL, EDSR);
 -              sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
 -                           EDMR);
 +              sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
  
                ret = sh_eth_check_reset(ndev);
                if (ret)
                if (mdp->cd->select_mii)
                        sh_eth_select_mii(ndev);
        } else {
 -              sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
 -                           EDMR);
 +              sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
                mdelay(3);
 -              sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
 -                           EDMR);
 +              sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
        }
  
        return ret;
@@@ -1276,7 -1285,7 +1276,7 @@@ static int sh_eth_dev_init(struct net_d
        sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
                     RFLR);
  
 -      sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
 +      sh_eth_modify(ndev, EESR, 0, 0);
        if (start) {
                mdp->irq_enabled = true;
                sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
@@@ -1523,13 -1532,15 +1523,13 @@@ static int sh_eth_rx(struct net_device 
  static void sh_eth_rcv_snd_disable(struct net_device *ndev)
  {
        /* disable tx and rx */
 -      sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
 -              ~(ECMR_RE | ECMR_TE), ECMR);
 +      sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
  }
  
  static void sh_eth_rcv_snd_enable(struct net_device *ndev)
  {
        /* enable tx and rx */
 -      sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
 -              (ECMR_RE | ECMR_TE), ECMR);
 +      sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
  }
  
  /* error control function */
@@@ -1558,11 -1569,13 +1558,11 @@@ static void sh_eth_error(struct net_dev
                                sh_eth_rcv_snd_disable(ndev);
                        } else {
                                /* Link Up */
 -                              sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
 -                                                 ~DMAC_M_ECI, EESIPR);
 +                              sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0);
                                /* clear int */
 -                              sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
 -                                           ECSR);
 -                              sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
 -                                                 DMAC_M_ECI, EESIPR);
 +                              sh_eth_modify(ndev, ECSR, 0, 0);
 +                              sh_eth_modify(ndev, EESIPR, DMAC_M_ECI,
 +                                            DMAC_M_ECI);
                                /* enable tx and rx */
                                sh_eth_rcv_snd_enable(ndev);
                        }
@@@ -1752,7 -1765,9 +1752,7 @@@ static void sh_eth_adjust_link(struct n
                                mdp->cd->set_rate(ndev);
                }
                if (!mdp->link) {
 -                      sh_eth_write(ndev,
 -                                   sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
 -                                   ECMR);
 +                      sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
                        new_state = 1;
                        mdp->link = phydev->link;
                        if (mdp->cd->no_psr || mdp->no_ether_link)
@@@ -2907,6 -2922,8 +2907,6 @@@ static const u16 *sh_eth_get_register_o
        case SH_ETH_REG_FAST_SH3_SH2:
                reg_offset = sh_eth_offset_fast_sh3_sh2;
                break;
 -      default:
 -              break;
        }
  
        return reg_offset;
@@@ -3044,15 -3061,11 +3044,11 @@@ static int sh_eth_drv_probe(struct plat
        mdp->ether_link_active_low = pd->ether_link_active_low;
  
        /* set cpu data */
-       if (id) {
+       if (id)
                mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
-       } else  {
-               const struct of_device_id *match;
+       else
+               mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
  
-               match = of_match_device(of_match_ptr(sh_eth_match_table),
-                                       &pdev->dev);
-               mdp->cd = (struct sh_eth_cpu_data *)match->data;
-       }
        mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
        if (!mdp->reg_offset) {
                dev_err(&pdev->dev, "Unknown register type (%d)\n",
index 9cf181f839fd3dcda71aa32f6a4476846a2e4287,4514ba73d96116317ca5ff8b797b037ae7434ffd..dcbd2a1601e89a620bb2dc1c77887e374d3fe2ba
@@@ -95,42 -95,6 +95,42 @@@ static int dwmac1000_validate_ucast_ent
        return x;
  }
  
 +/**
 + * stmmac_axi_setup - parse DT parameters for programming the AXI register
 + * @pdev: platform device
 + * @priv: driver private struct.
 + * Description:
 + * if required, from device-tree the AXI internal register can be tuned
 + * by using platform parameters.
 + */
 +static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
 +{
 +      struct device_node *np;
 +      struct stmmac_axi *axi;
 +
 +      np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
 +      if (!np)
 +              return NULL;
 +
 +      axi = kzalloc(sizeof(*axi), GFP_KERNEL);
 +      if (!axi)
 +              return ERR_PTR(-ENOMEM);
 +
 +      axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
 +      axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
 +      axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
 +      axi->axi_axi_all = of_property_read_bool(np, "snps,axi_all");
 +      axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
 +      axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
 +      axi->axi_rb =  of_property_read_bool(np, "snps,axi_rb");
 +
 +      of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt);
 +      of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt);
 +      of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
 +
 +      return axi;
 +}
 +
  /**
   * stmmac_probe_config_dt - parse device-tree driver parameters
   * @pdev: platform_device structure
@@@ -146,6 -110,7 +146,7 @@@ stmmac_probe_config_dt(struct platform_
        struct device_node *np = pdev->dev.of_node;
        struct plat_stmmacenet_data *plat;
        struct stmmac_dma_cfg *dma_cfg;
+       struct device_node *child_node = NULL;
  
        plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
        if (!plat)
                plat->phy_node = of_node_get(np);
        }
  
+       for_each_child_of_node(np, child_node)
+               if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) {
+                       plat->mdio_node = child_node;
+                       break;
+               }
        /* "snps,phy-addr" is not a standard property. Mark it as deprecated
         * and warn of its use. Remove this when phy node support is added.
         */
        if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
                dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
  
-       if ((plat->phy_node && !of_phy_is_fixed_link(np)) || plat->phy_bus_name)
+       if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node)
                plat->mdio_bus_data = NULL;
        else
                plat->mdio_bus_data =
                }
                plat->dma_cfg = dma_cfg;
                of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
 +              dma_cfg->aal = of_property_read_bool(np, "snps,aal");
                dma_cfg->fixed_burst =
                        of_property_read_bool(np, "snps,fixed-burst");
                dma_cfg->mixed_burst =
                        of_property_read_bool(np, "snps,mixed-burst");
 -              of_property_read_u32(np, "snps,burst_len", &dma_cfg->burst_len);
 -              if (dma_cfg->burst_len < 0 || dma_cfg->burst_len > 256)
 -                      dma_cfg->burst_len = 0;
        }
        plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
        if (plat->force_thresh_dma_mode) {
                pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
        }
  
 +      plat->axi = stmmac_axi_setup(pdev);
 +
        return plat;
  }
  #else
diff --combined drivers/net/phy/micrel.c
index 48219c83fb006fb27fe47f56221c06916e19c835,dc85f7095e51038c0ebbb1182ba0d169ec0b2dd7..4516c8a4fd82b3d73a7393f2fd3b09d25d4263bc
@@@ -297,6 -297,17 +297,17 @@@ static int kszphy_config_init(struct ph
        if (priv->led_mode >= 0)
                kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
  
+       if (phy_interrupt_is_valid(phydev)) {
+               int ctl = phy_read(phydev, MII_BMCR);
+               if (ctl < 0)
+                       return ctl;
+               ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE);
+               if (ret < 0)
+                       return ret;
+       }
        return 0;
  }
  
@@@ -612,19 -623,18 +623,19 @@@ static u64 kszphy_get_stat(struct phy_d
  {
        struct kszphy_hw_stat stat = kszphy_hw_stats[i];
        struct kszphy_priv *priv = phydev->priv;
 -      u64 val;
 +      int val;
 +      u64 ret;
  
        val = phy_read(phydev, stat.reg);
        if (val < 0) {
 -              val = UINT64_MAX;
 +              ret = UINT64_MAX;
        } else {
                val = val & ((1 << stat.bits) - 1);
                priv->stats[i] += val;
 -              val = priv->stats[i];
 +              ret = priv->stats[i];
        }
  
 -      return val;
 +      return ret;
  }
  
  static void kszphy_get_stats(struct phy_device *phydev,
                data[i] = kszphy_get_stat(phydev, i);
  }
  
+ static int kszphy_resume(struct phy_device *phydev)
+ {
+       int value;
+       mutex_lock(&phydev->lock);
+       value = phy_read(phydev, MII_BMCR);
+       phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
+       kszphy_config_intr(phydev);
+       mutex_unlock(&phydev->lock);
+       return 0;
+ }
  static int kszphy_probe(struct phy_device *phydev)
  {
        const struct kszphy_type *type = phydev->drv->driver_data;
@@@ -845,7 -870,7 +871,7 @@@ static struct phy_driver ksphy_driver[
        .get_strings    = kszphy_get_strings,
        .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
-       .resume         = genphy_resume,
+       .resume         = kszphy_resume,
  }, {
        .phy_id         = PHY_ID_KSZ8061,
        .name           = "Micrel KSZ8061",
index 04f4eb34fa8084aecc5a4db4cac08e73acf76b1c,d61da9ece3ba021a7aa68253efe8dd82198cb575..931836e09a6b62f3b34de3f6490e655bf05ce1eb
@@@ -443,9 -443,14 +443,14 @@@ static ssize_t ppp_read(struct file *fi
                         * network traffic (demand mode).
                         */
                        struct ppp *ppp = PF_TO_PPP(pf);
+                       ppp_recv_lock(ppp);
                        if (ppp->n_channels == 0 &&
-                           (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+                           (ppp->flags & SC_LOOP_TRAFFIC) == 0) {
+                               ppp_recv_unlock(ppp);
                                break;
+                       }
+                       ppp_recv_unlock(ppp);
                }
                ret = -EAGAIN;
                if (file->f_flags & O_NONBLOCK)
@@@ -532,9 -537,12 +537,12 @@@ static unsigned int ppp_poll(struct fil
        else if (pf->kind == INTERFACE) {
                /* see comment in ppp_read */
                struct ppp *ppp = PF_TO_PPP(pf);
+               ppp_recv_lock(ppp);
                if (ppp->n_channels == 0 &&
                    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
                        mask |= POLLIN | POLLRDNORM;
+               ppp_recv_unlock(ppp);
        }
  
        return mask;
@@@ -2429,15 -2437,13 +2437,15 @@@ ppp_set_compress(struct ppp *ppp, unsig
        unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
  
        err = -EFAULT;
 -      if (copy_from_user(&data, (void __user *) arg, sizeof(data)) ||
 -          (data.length <= CCP_MAX_OPTION_LENGTH &&
 -           copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
 +      if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
                goto out;
 +      if (data.length > CCP_MAX_OPTION_LENGTH)
 +              goto out;
 +      if (copy_from_user(ccp_option, (void __user *) data.ptr, data.length))
 +              goto out;
 +
        err = -EINVAL;
 -      if (data.length > CCP_MAX_OPTION_LENGTH ||
 -          ccp_option[1] < 2 || ccp_option[1] > data.length)
 +      if (data.length < 2 || ccp_option[1] < 2 || ccp_option[1] > data.length)
                goto out;
  
        cp = try_then_request_module(
@@@ -2810,6 -2816,7 +2818,7 @@@ static struct ppp *ppp_create_interface
  
  out2:
        mutex_unlock(&pn->all_ppp_mutex);
+       rtnl_unlock();
        free_netdev(dev);
  out1:
        *retp = ret;
diff --combined drivers/net/vrf.c
index 9ce088bb28ab9b6206737984dbe21e7093b28709,bdcf617a9d52b86eb41c13dad6df1c3c42d3d319..9a9fabb900c19e5f7dc483a63f1b71fcd1a96e5a
@@@ -32,6 -32,7 +32,6 @@@
  #include <net/ip_fib.h>
  #include <net/ip6_fib.h>
  #include <net/ip6_route.h>
 -#include <net/rtnetlink.h>
  #include <net/route.h>
  #include <net/addrconf.h>
  #include <net/l3mdev.h>
@@@ -103,20 -104,23 +103,23 @@@ static struct dst_ops vrf_dst_ops = 
  #if IS_ENABLED(CONFIG_IPV6)
  static bool check_ipv6_frame(const struct sk_buff *skb)
  {
-       const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data;
-       size_t hlen = sizeof(*ipv6h);
+       const struct ipv6hdr *ipv6h;
+       struct ipv6hdr _ipv6h;
        bool rc = true;
  
-       if (skb->len < hlen)
+       ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
+       if (!ipv6h)
                goto out;
  
        if (ipv6h->nexthdr == NEXTHDR_ICMP) {
                const struct icmp6hdr *icmph;
+               struct icmp6hdr _icmph;
  
-               if (skb->len < hlen + sizeof(*icmph))
+               icmph = skb_header_pointer(skb, sizeof(_ipv6h),
+                                          sizeof(_icmph), &_icmph);
+               if (!icmph)
                        goto out;
  
-               icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h));
                switch (icmph->icmp6_type) {
                case NDISC_ROUTER_SOLICITATION:
                case NDISC_ROUTER_ADVERTISEMENT:
@@@ -876,24 -880,6 +879,24 @@@ static int vrf_fillinfo(struct sk_buff 
        return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
  }
  
 +static size_t vrf_get_slave_size(const struct net_device *bond_dev,
 +                               const struct net_device *slave_dev)
 +{
 +      return nla_total_size(sizeof(u32));  /* IFLA_VRF_PORT_TABLE */
 +}
 +
 +static int vrf_fill_slave_info(struct sk_buff *skb,
 +                             const struct net_device *vrf_dev,
 +                             const struct net_device *slave_dev)
 +{
 +      struct net_vrf *vrf = netdev_priv(vrf_dev);
 +
 +      if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
 +              return -EMSGSIZE;
 +
 +      return 0;
 +}
 +
  static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
        [IFLA_VRF_TABLE] = { .type = NLA_U32 },
  };
@@@ -907,9 -893,6 +910,9 @@@ static struct rtnl_link_ops vrf_link_op
        .validate       = vrf_validate,
        .fill_info      = vrf_fillinfo,
  
 +      .get_slave_size  = vrf_get_slave_size,
 +      .fill_slave_info = vrf_fill_slave_info,
 +
        .newlink        = vrf_newlink,
        .dellink        = vrf_dellink,
        .setup          = vrf_setup,
diff --combined drivers/net/vxlan.c
index 8ca243d93b781acb1bdd612dee90c8722b4b21f1,1c32bd10479730a73f2832fa7f15ef3d845941db..fc998a3bd2347a756e5c61088eabfe783ef99951
@@@ -42,7 -42,7 +42,7 @@@
  #include <net/netns/generic.h>
  #include <net/vxlan.h>
  #include <net/protocol.h>
 -#include <net/udp_tunnel.h>
 +
  #if IS_ENABLED(CONFIG_IPV6)
  #include <net/ipv6.h>
  #include <net/addrconf.h>
@@@ -197,9 -197,9 +197,9 @@@ static int vxlan_nla_put_addr(struct sk
  #endif
  
  /* Virtual Network hash table head */
 -static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
 +static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
  {
 -      return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
 +      return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
  }
  
  /* Socket hash table head */
@@@ -242,16 -242,12 +242,16 @@@ static struct vxlan_sock *vxlan_find_so
        return NULL;
  }
  
 -static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
 +static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
  {
        struct vxlan_dev *vxlan;
  
 -      hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
 -              if (vxlan->default_dst.remote_vni == id)
 +      /* For flow based devices, map all packets to VNI 0 */
 +      if (vs->flags & VXLAN_F_COLLECT_METADATA)
 +              vni = 0;
 +
 +      hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
 +              if (vxlan->default_dst.remote_vni == vni)
                        return vxlan;
        }
  
  }
  
  /* Look up VNI in a per net namespace table */
 -static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
 +static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni,
                                        sa_family_t family, __be16 port,
                                        u32 flags)
  {
        if (!vs)
                return NULL;
  
 -      return vxlan_vs_find_vni(vs, id);
 +      return vxlan_vs_find_vni(vs, vni);
  }
  
  /* Fill in neighbour message in skbuff. */
@@@ -319,7 -315,7 +319,7 @@@ static int vxlan_fdb_info(struct sk_buf
            nla_put_be16(skb, NDA_PORT, rdst->remote_port))
                goto nla_put_failure;
        if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
 -          nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
 +          nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
                goto nla_put_failure;
        if (rdst->remote_ifindex &&
            nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
@@@ -387,7 -383,7 +387,7 @@@ static void vxlan_ip_miss(struct net_de
        };
        struct vxlan_rdst remote = {
                .remote_ip = *ipa, /* goes to NDA_DST */
 -              .remote_vni = VXLAN_N_VID,
 +              .remote_vni = cpu_to_be32(VXLAN_N_VID),
        };
  
        vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
@@@ -456,7 -452,7 +456,7 @@@ static struct vxlan_fdb *vxlan_find_mac
  /* caller should hold vxlan->hash_lock */
  static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
                                              union vxlan_addr *ip, __be16 port,
 -                                            __u32 vni, __u32 ifindex)
 +                                            __be32 vni, __u32 ifindex)
  {
        struct vxlan_rdst *rd;
  
  
  /* Replace destination of unicast mac */
  static int vxlan_fdb_replace(struct vxlan_fdb *f,
 -                           union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
 +                           union vxlan_addr *ip, __be16 port, __be32 vni,
 +                           __u32 ifindex)
  {
        struct vxlan_rdst *rd;
  
        rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
        if (!rd)
                return 0;
 +
 +      dst_cache_reset(&rd->dst_cache);
        rd->remote_ip = *ip;
        rd->remote_port = port;
        rd->remote_vni = vni;
  
  /* Add/update destinations for multicast */
  static int vxlan_fdb_append(struct vxlan_fdb *f,
 -                          union vxlan_addr *ip, __be16 port, __u32 vni,
 +                          union vxlan_addr *ip, __be16 port, __be32 vni,
                            __u32 ifindex, struct vxlan_rdst **rdp)
  {
        struct vxlan_rdst *rd;
        rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
        if (rd == NULL)
                return -ENOBUFS;
 +
 +      if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
 +              kfree(rd);
 +              return -ENOBUFS;
 +      }
 +
        rd->remote_ip = *ip;
        rd->remote_port = port;
        rd->remote_vni = vni;
  static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
                                          unsigned int off,
                                          struct vxlanhdr *vh, size_t hdrlen,
 -                                        u32 data, struct gro_remcsum *grc,
 +                                        __be32 vni_field,
 +                                        struct gro_remcsum *grc,
                                          bool nopartial)
  {
        size_t start, offset;
        if (!NAPI_GRO_CB(skb)->csum_valid)
                return NULL;
  
 -      start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
 -      offset = start + ((data & VXLAN_RCO_UDP) ?
 -                        offsetof(struct udphdr, check) :
 -                        offsetof(struct tcphdr, check));
 +      start = vxlan_rco_start(vni_field);
 +      offset = start + vxlan_rco_offset(vni_field);
  
        vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
                                     start, offset, grc, nopartial);
@@@ -561,7 -549,7 +561,7 @@@ static struct sk_buff **vxlan_gro_recei
        int flush = 1;
        struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
                                             udp_offloads);
 -      u32 flags;
 +      __be32 flags;
        struct gro_remcsum grc;
  
        skb_gro_remcsum_init(&grc);
  
        skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
  
 -      flags = ntohl(vh->vx_flags);
 +      flags = vh->vx_flags;
  
        if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
                vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
 -                                     ntohl(vh->vx_vni), &grc,
 +                                     vh->vx_vni, &grc,
                                       !!(vs->flags &
                                          VXLAN_F_REMCSUM_NOPARTIAL));
  
@@@ -672,7 -660,7 +672,7 @@@ static void vxlan_notify_del_rx_port(st
  static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                            const u8 *mac, union vxlan_addr *ip,
                            __u16 state, __u16 flags,
 -                          __be16 port, __u32 vni, __u32 ifindex,
 +                          __be16 port, __be32 vni, __u32 ifindex,
                            __u8 ndm_flags)
  {
        struct vxlan_rdst *rd = NULL;
@@@ -761,10 -749,8 +761,10 @@@ static void vxlan_fdb_free(struct rcu_h
        struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
        struct vxlan_rdst *rd, *nd;
  
 -      list_for_each_entry_safe(rd, nd, &f->remotes, list)
 +      list_for_each_entry_safe(rd, nd, &f->remotes, list) {
 +              dst_cache_destroy(&rd->dst_cache);
                kfree(rd);
 +      }
        kfree(f);
  }
  
@@@ -781,8 -767,7 +781,8 @@@ static void vxlan_fdb_destroy(struct vx
  }
  
  static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
 -                         union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
 +                         union vxlan_addr *ip, __be16 *port, __be32 *vni,
 +                         u32 *ifindex)
  {
        struct net *net = dev_net(vxlan->dev);
        int err;
        if (tb[NDA_VNI]) {
                if (nla_len(tb[NDA_VNI]) != sizeof(u32))
                        return -EINVAL;
 -              *vni = nla_get_u32(tb[NDA_VNI]);
 +              *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
        } else {
                *vni = vxlan->default_dst.remote_vni;
        }
@@@ -845,8 -830,7 +845,8 @@@ static int vxlan_fdb_add(struct ndmsg *
        /* struct net *net = dev_net(vxlan->dev); */
        union vxlan_addr ip;
        __be16 port;
 -      u32 vni, ifindex;
 +      __be32 vni;
 +      u32 ifindex;
        int err;
  
        if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
@@@ -883,8 -867,7 +883,8 @@@ static int vxlan_fdb_delete(struct ndms
        struct vxlan_rdst *rd = NULL;
        union vxlan_addr ip;
        __be16 port;
 -      u32 vni, ifindex;
 +      __be32 vni;
 +      u32 ifindex;
        int err;
  
        err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
@@@ -948,8 -931,10 +948,10 @@@ static int vxlan_fdb_dump(struct sk_buf
                                                     cb->nlh->nlmsg_seq,
                                                     RTM_NEWNEIGH,
                                                     NLM_F_MULTI, rd);
-                               if (err < 0)
+                               if (err < 0) {
+                                       cb->args[1] = err;
                                        goto out;
+                               }
  skip:
                                ++idx;
                        }
@@@ -1139,167 -1124,177 +1141,168 @@@ static int vxlan_igmp_leave(struct vxla
        return ret;
  }
  
 -static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
 -                                    size_t hdrlen, u32 data, bool nopartial)
 +static bool vxlan_remcsum(struct vxlanhdr *unparsed,
 +                        struct sk_buff *skb, u32 vxflags)
  {
        size_t start, offset, plen;
  
 -      if (skb->remcsum_offload)
 -              return vh;
 +      if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
 +              goto out;
  
 -      start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
 -      offset = start + ((data & VXLAN_RCO_UDP) ?
 -                        offsetof(struct udphdr, check) :
 -                        offsetof(struct tcphdr, check));
 +      start = vxlan_rco_start(unparsed->vx_vni);
 +      offset = start + vxlan_rco_offset(unparsed->vx_vni);
  
 -      plen = hdrlen + offset + sizeof(u16);
 +      plen = sizeof(struct vxlanhdr) + offset + sizeof(u16);
  
        if (!pskb_may_pull(skb, plen))
 -              return NULL;
 +              return false;
 +
 +      skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
 +                          !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
 +out:
 +      unparsed->vx_flags &= ~VXLAN_HF_RCO;
 +      unparsed->vx_vni &= VXLAN_VNI_MASK;
 +      return true;
 +}
 +
 +static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
 +                              struct sk_buff *skb, u32 vxflags,
 +                              struct vxlan_metadata *md)
 +{
 +      struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
 +      struct metadata_dst *tun_dst;
  
 -      vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
 +      if (!(unparsed->vx_flags & VXLAN_HF_GBP))
 +              goto out;
  
 -      skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset,
 -                          nopartial);
 +      md->gbp = ntohs(gbp->policy_id);
  
 -      return vh;
 +      tun_dst = (struct metadata_dst *)skb_dst(skb);
-       if (tun_dst)
++      if (tun_dst) {
 +              tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
++              tun_dst->u.tun_info.options_len = sizeof(*md);
++      }
 +      if (gbp->dont_learn)
 +              md->gbp |= VXLAN_GBP_DONT_LEARN;
 +
 +      if (gbp->policy_applied)
 +              md->gbp |= VXLAN_GBP_POLICY_APPLIED;
 +
 +      /* In flow-based mode, GBP is carried in dst_metadata */
 +      if (!(vxflags & VXLAN_F_COLLECT_METADATA))
 +              skb->mark = md->gbp;
 +out:
 +      unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
  }
  
 -static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
 -                    struct vxlan_metadata *md, u32 vni,
 -                    struct metadata_dst *tun_dst)
 +static bool vxlan_set_mac(struct vxlan_dev *vxlan,
 +                        struct vxlan_sock *vs,
 +                        struct sk_buff *skb)
  {
 -      struct iphdr *oip = NULL;
 -      struct ipv6hdr *oip6 = NULL;
 -      struct vxlan_dev *vxlan;
 -      struct pcpu_sw_netstats *stats;
        union vxlan_addr saddr;
 -      int err = 0;
 -
 -      /* For flow based devices, map all packets to VNI 0 */
 -      if (vs->flags & VXLAN_F_COLLECT_METADATA)
 -              vni = 0;
 -
 -      /* Is this VNI defined? */
 -      vxlan = vxlan_vs_find_vni(vs, vni);
 -      if (!vxlan)
 -              goto drop;
  
        skb_reset_mac_header(skb);
 -      skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
        skb->protocol = eth_type_trans(skb, vxlan->dev);
        skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
  
        /* Ignore packet loops (and multicast echo) */
        if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
 -              goto drop;
 +              return false;
  
 -      /* Get data from the outer IP header */
 +      /* Get address from the outer IP header */
        if (vxlan_get_sk_family(vs) == AF_INET) {
 -              oip = ip_hdr(skb);
 -              saddr.sin.sin_addr.s_addr = oip->saddr;
 +              saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
                saddr.sa.sa_family = AF_INET;
  #if IS_ENABLED(CONFIG_IPV6)
        } else {
 -              oip6 = ipv6_hdr(skb);
 -              saddr.sin6.sin6_addr = oip6->saddr;
 +              saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
                saddr.sa.sa_family = AF_INET6;
  #endif
        }
  
 -      if (tun_dst) {
 -              skb_dst_set(skb, (struct dst_entry *)tun_dst);
 -              tun_dst = NULL;
 -      }
 -
        if ((vxlan->flags & VXLAN_F_LEARN) &&
            vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
 -              goto drop;
 -
 -      skb_reset_network_header(skb);
 -      /* In flow-based mode, GBP is carried in dst_metadata */
 -      if (!(vs->flags & VXLAN_F_COLLECT_METADATA))
 -              skb->mark = md->gbp;
 -
 -      if (oip6)
 -              err = IP6_ECN_decapsulate(oip6, skb);
 -      if (oip)
 -              err = IP_ECN_decapsulate(oip, skb);
 -
 -      if (unlikely(err)) {
 -              if (log_ecn_error) {
 -                      if (oip6)
 -                              net_info_ratelimited("non-ECT from %pI6\n",
 -                                                   &oip6->saddr);
 -                      if (oip)
 -                              net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
 -                                                   &oip->saddr, oip->tos);
 -              }
 -              if (err > 1) {
 -                      ++vxlan->dev->stats.rx_frame_errors;
 -                      ++vxlan->dev->stats.rx_errors;
 -                      goto drop;
 -              }
 -      }
 +              return false;
  
 -      stats = this_cpu_ptr(vxlan->dev->tstats);
 -      u64_stats_update_begin(&stats->syncp);
 -      stats->rx_packets++;
 -      stats->rx_bytes += skb->len;
 -      u64_stats_update_end(&stats->syncp);
 +      return true;
 +}
  
 -      gro_cells_receive(&vxlan->gro_cells, skb);
 +static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
 +                                struct sk_buff *skb)
 +{
 +      int err = 0;
  
 -      return;
 -drop:
 -      if (tun_dst)
 -              dst_release((struct dst_entry *)tun_dst);
 +      if (vxlan_get_sk_family(vs) == AF_INET)
 +              err = IP_ECN_decapsulate(oiph, skb);
 +#if IS_ENABLED(CONFIG_IPV6)
 +      else
 +              err = IP6_ECN_decapsulate(oiph, skb);
 +#endif
  
 -      /* Consume bad packet */
 -      kfree_skb(skb);
 +      if (unlikely(err) && log_ecn_error) {
 +              if (vxlan_get_sk_family(vs) == AF_INET)
 +                      net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
 +                                           &((struct iphdr *)oiph)->saddr,
 +                                           ((struct iphdr *)oiph)->tos);
 +              else
 +                      net_info_ratelimited("non-ECT from %pI6\n",
 +                                           &((struct ipv6hdr *)oiph)->saddr);
 +      }
 +      return err <= 1;
  }
  
  /* Callback from net/ipv4/udp.c to receive packets */
 -static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 +static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
  {
 -      struct metadata_dst *tun_dst = NULL;
 +      struct pcpu_sw_netstats *stats;
 +      struct vxlan_dev *vxlan;
        struct vxlan_sock *vs;
 -      struct vxlanhdr *vxh;
 -      u32 flags, vni;
 +      struct vxlanhdr unparsed;
        struct vxlan_metadata _md;
        struct vxlan_metadata *md = &_md;
 +      void *oiph;
  
        /* Need Vxlan and inner Ethernet header to be present */
        if (!pskb_may_pull(skb, VXLAN_HLEN))
 -              goto error;
 -
 -      vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
 -      flags = ntohl(vxh->vx_flags);
 -      vni = ntohl(vxh->vx_vni);
 +              return 1;
  
 -      if (flags & VXLAN_HF_VNI) {
 -              flags &= ~VXLAN_HF_VNI;
 -      } else {
 -              /* VNI flag always required to be set */
 -              goto bad_flags;
 +      unparsed = *vxlan_hdr(skb);
 +      /* VNI flag always required to be set */
 +      if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
 +              netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
 +                         ntohl(vxlan_hdr(skb)->vx_flags),
 +                         ntohl(vxlan_hdr(skb)->vx_vni));
 +              /* Return non vxlan pkt */
 +              return 1;
        }
 -
 -      if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
 -              goto drop;
 -      vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
 +      unparsed.vx_flags &= ~VXLAN_HF_VNI;
 +      unparsed.vx_vni &= ~VXLAN_VNI_MASK;
  
        vs = rcu_dereference_sk_user_data(sk);
        if (!vs)
                goto drop;
  
 -      if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
 -              vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni,
 -                                  !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL));
 -              if (!vxh)
 -                      goto drop;
 +      vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni));
 +      if (!vxlan)
 +              goto drop;
  
 -              flags &= ~VXLAN_HF_RCO;
 -              vni &= VXLAN_VNI_MASK;
 -      }
 +      if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB),
 +                               !net_eq(vxlan->net, dev_net(vxlan->dev))))
 +              goto drop;
  
        if (vxlan_collect_metadata(vs)) {
 +              __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
 +              struct metadata_dst *tun_dst;
 +
                tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
 -                                       cpu_to_be64(vni >> 8), sizeof(*md));
 +                                       vxlan_vni_to_tun_id(vni), sizeof(*md));
  
                if (!tun_dst)
                        goto drop;
  
                md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
 +
 +              skb_dst_set(skb, (struct dst_entry *)tun_dst);
        } else {
                memset(md, 0, sizeof(*md));
        }
        /* For backwards compatibility, only allow reserved fields to be
         * used by VXLAN extensions if explicitly requested.
         */
 -      if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
 -              struct vxlanhdr_gbp *gbp;
 -
 -              gbp = (struct vxlanhdr_gbp *)vxh;
 -              md->gbp = ntohs(gbp->policy_id);
 -
 -              if (tun_dst) {
 -                      tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
 -                      tun_dst->u.tun_info.options_len = sizeof(*md);
 -              }
 -
 -              if (gbp->dont_learn)
 -                      md->gbp |= VXLAN_GBP_DONT_LEARN;
 -
 -              if (gbp->policy_applied)
 -                      md->gbp |= VXLAN_GBP_POLICY_APPLIED;
 -
 -              flags &= ~VXLAN_GBP_USED_BITS;
 -      }
 +      if (vs->flags & VXLAN_F_REMCSUM_RX)
 +              if (!vxlan_remcsum(&unparsed, skb, vs->flags))
 +                      goto drop;
 +      if (vs->flags & VXLAN_F_GBP)
 +              vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
  
 -      if (flags || vni & ~VXLAN_VNI_MASK) {
 +      if (unparsed.vx_flags || unparsed.vx_vni) {
                /* If there are any unprocessed flags remaining treat
                 * this as a malformed packet. This behavior diverges from
                 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
                 * is more robust and provides a little more security in
                 * adding extensions to VXLAN.
                 */
 +              goto drop;
 +      }
  
 -              goto bad_flags;
 +      if (!vxlan_set_mac(vxlan, vs, skb))
 +              goto drop;
 +
 +      oiph = skb_network_header(skb);
 +      skb_reset_network_header(skb);
 +
 +      if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
 +              ++vxlan->dev->stats.rx_frame_errors;
 +              ++vxlan->dev->stats.rx_errors;
 +              goto drop;
        }
  
 -      vxlan_rcv(vs, skb, md, vni >> 8, tun_dst);
 +      stats = this_cpu_ptr(vxlan->dev->tstats);
 +      u64_stats_update_begin(&stats->syncp);
 +      stats->rx_packets++;
 +      stats->rx_bytes += skb->len;
 +      u64_stats_update_end(&stats->syncp);
 +
 +      gro_cells_receive(&vxlan->gro_cells, skb);
        return 0;
  
  drop:
        /* Consume bad packet */
        kfree_skb(skb);
        return 0;
 -
 -bad_flags:
 -      netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
 -                 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
 -
 -error:
 -      if (tun_dst)
 -              dst_release((struct dst_entry *)tun_dst);
 -
 -      /* Return non vxlan pkt */
 -      return 1;
  }
  
  static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
@@@ -1460,7 -1463,7 +1463,7 @@@ static struct sk_buff *vxlan_na_create(
        reply->dev = dev;
        skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
        skb_push(reply, sizeof(struct ethhdr));
 -      skb_set_mac_header(reply, 0);
 +      skb_reset_mac_header(reply);
  
        ns = (struct nd_msg *)skb_transport_header(request);
  
        reply->protocol = htons(ETH_P_IPV6);
  
        skb_pull(reply, sizeof(struct ethhdr));
 -      skb_set_network_header(reply, 0);
 +      skb_reset_network_header(reply);
        skb_put(reply, sizeof(struct ipv6hdr));
  
        /* IPv6 header */
        pip6->saddr = *(struct in6_addr *)n->primary_key;
  
        skb_pull(reply, sizeof(struct ipv6hdr));
 -      skb_set_transport_header(reply, 0);
 +      skb_reset_transport_header(reply);
  
        na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
  
@@@ -1674,7 -1677,7 +1677,7 @@@ static void vxlan_build_gbp_hdr(struct 
                return;
  
        gbp = (struct vxlanhdr_gbp *)vxh;
 -      vxh->vx_flags |= htonl(VXLAN_HF_GBP);
 +      vxh->vx_flags |= VXLAN_HF_GBP;
  
        if (md->gbp & VXLAN_GBP_DONT_LEARN)
                gbp->dont_learn = 1;
        gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
  }
  
 -#if IS_ENABLED(CONFIG_IPV6)
 -static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
 -                         struct sk_buff *skb,
 -                         struct net_device *dev, struct in6_addr *saddr,
 -                         struct in6_addr *daddr, __u8 prio, __u8 ttl,
 -                         __be16 src_port, __be16 dst_port, __be32 vni,
 -                         struct vxlan_metadata *md, bool xnet, u32 vxflags)
 +static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
 +                         int iphdr_len, __be32 vni,
 +                         struct vxlan_metadata *md, u32 vxflags,
 +                         bool udp_sum)
  {
        struct vxlanhdr *vxh;
        int min_headroom;
        int err;
 -      bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
        int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
 -      u16 hdrlen = sizeof(struct vxlanhdr);
  
        if ((vxflags & VXLAN_F_REMCSUM_TX) &&
            skb->ip_summed == CHECKSUM_PARTIAL) {
                if (csum_start <= VXLAN_MAX_REMCSUM_START &&
                    !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
                    (skb->csum_offset == offsetof(struct udphdr, check) ||
 -                   skb->csum_offset == offsetof(struct tcphdr, check))) {
 -                      udp_sum = false;
 +                   skb->csum_offset == offsetof(struct tcphdr, check)))
                        type |= SKB_GSO_TUNNEL_REMCSUM;
 -              }
        }
  
 -      skb_scrub_packet(skb, xnet);
 -
        min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
 -                      + VXLAN_HLEN + sizeof(struct ipv6hdr)
 +                      + VXLAN_HLEN + iphdr_len
                        + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
  
        /* Need space for new headers (invalidates iph ptr) */
        err = skb_cow_head(skb, min_headroom);
        if (unlikely(err)) {
                kfree_skb(skb);
 -              goto err;
 +              return err;
        }
  
        skb = vlan_hwaccel_push_inside(skb);
 -      if (WARN_ON(!skb)) {
 -              err = -ENOMEM;
 -              goto err;
 -      }
 +      if (WARN_ON(!skb))
 +              return -ENOMEM;
  
 -      skb = iptunnel_handle_offloads(skb, udp_sum, type);
 -      if (IS_ERR(skb)) {
 -              err = -EINVAL;
 -              goto err;
 -      }
 +      skb = iptunnel_handle_offloads(skb, type);
 +      if (IS_ERR(skb))
 +              return PTR_ERR(skb);
  
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
 -      vxh->vx_flags = htonl(VXLAN_HF_VNI);
 -      vxh->vx_vni = vni;
 +      vxh->vx_flags = VXLAN_HF_VNI;
 +      vxh->vx_vni = vxlan_vni_field(vni);
  
        if (type & SKB_GSO_TUNNEL_REMCSUM) {
 -              u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
 -                         VXLAN_RCO_SHIFT;
 -
 -              if (skb->csum_offset == offsetof(struct udphdr, check))
 -                      data |= VXLAN_RCO_UDP;
 +              unsigned int start;
  
 -              vxh->vx_vni |= htonl(data);
 -              vxh->vx_flags |= htonl(VXLAN_HF_RCO);
 +              start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
 +              vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
 +              vxh->vx_flags |= VXLAN_HF_RCO;
  
                if (!skb_is_gso(skb)) {
                        skb->ip_summed = CHECKSUM_NONE;
                vxlan_build_gbp_hdr(vxh, vxflags, md);
  
        skb_set_inner_protocol(skb, htons(ETH_P_TEB));
 -
 -      udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio,
 -                           ttl, src_port, dst_port,
 -                           !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
        return 0;
 -err:
 -      dst_release(dst);
 -      return err;
  }
 -#endif
  
 -static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
 -                        __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
 -                        __be16 src_port, __be16 dst_port, __be32 vni,
 -                        struct vxlan_metadata *md, bool xnet, u32 vxflags)
 +static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
 +                                    struct sk_buff *skb, int oif, u8 tos,
 +                                    __be32 daddr, __be32 *saddr,
 +                                    struct dst_cache *dst_cache,
 +                                    struct ip_tunnel_info *info)
  {
 -      struct vxlanhdr *vxh;
 -      int min_headroom;
 -      int err;
 -      bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
 -      int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
 -      u16 hdrlen = sizeof(struct vxlanhdr);
 -
 -      if ((vxflags & VXLAN_F_REMCSUM_TX) &&
 -          skb->ip_summed == CHECKSUM_PARTIAL) {
 -              int csum_start = skb_checksum_start_offset(skb);
 -
 -              if (csum_start <= VXLAN_MAX_REMCSUM_START &&
 -                  !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
 -                  (skb->csum_offset == offsetof(struct udphdr, check) ||
 -                   skb->csum_offset == offsetof(struct tcphdr, check))) {
 -                      udp_sum = false;
 -                      type |= SKB_GSO_TUNNEL_REMCSUM;
 -              }
 -      }
 -
 -      min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
 -                      + VXLAN_HLEN + sizeof(struct iphdr)
 -                      + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 +      struct rtable *rt = NULL;
 +      bool use_cache = false;
 +      struct flowi4 fl4;
  
 -      /* Need space for new headers (invalidates iph ptr) */
 -      err = skb_cow_head(skb, min_headroom);
 -      if (unlikely(err)) {
 -              kfree_skb(skb);
 -              return err;
 +      /* when the ip_tunnel_info is availble, the tos used for lookup is
 +       * packet independent, so we can use the cache
 +       */
 +      if (!skb->mark && (!tos || info)) {
 +              use_cache = true;
 +              rt = dst_cache_get_ip4(dst_cache, saddr);
 +              if (rt)
 +                      return rt;
        }
  
 -      skb = vlan_hwaccel_push_inside(skb);
 -      if (WARN_ON(!skb))
 -              return -ENOMEM;
 -
 -      skb = iptunnel_handle_offloads(skb, udp_sum, type);
 -      if (IS_ERR(skb))
 -              return PTR_ERR(skb);
 -
 -      vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
 -      vxh->vx_flags = htonl(VXLAN_HF_VNI);
 -      vxh->vx_vni = vni;
 -
 -      if (type & SKB_GSO_TUNNEL_REMCSUM) {
 -              u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
 -                         VXLAN_RCO_SHIFT;
 -
 -              if (skb->csum_offset == offsetof(struct udphdr, check))
 -                      data |= VXLAN_RCO_UDP;
 -
 -              vxh->vx_vni |= htonl(data);
 -              vxh->vx_flags |= htonl(VXLAN_HF_RCO);
 +      memset(&fl4, 0, sizeof(fl4));
 +      fl4.flowi4_oif = oif;
 +      fl4.flowi4_tos = RT_TOS(tos);
 +      fl4.flowi4_mark = skb->mark;
 +      fl4.flowi4_proto = IPPROTO_UDP;
 +      fl4.daddr = daddr;
 +      fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
  
 -              if (!skb_is_gso(skb)) {
 -                      skb->ip_summed = CHECKSUM_NONE;
 -                      skb->encapsulation = 0;
 -              }
 +      rt = ip_route_output_key(vxlan->net, &fl4);
 +      if (!IS_ERR(rt)) {
 +              *saddr = fl4.saddr;
 +              if (use_cache)
 +                      dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
        }
 -
 -      if (vxflags & VXLAN_F_GBP)
 -              vxlan_build_gbp_hdr(vxh, vxflags, md);
 -
 -      skb_set_inner_protocol(skb, htons(ETH_P_TEB));
 -
 -      udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos, ttl, df,
 -                          src_port, dst_port, xnet,
 -                          !(vxflags & VXLAN_F_UDP_CSUM));
 -      return 0;
 +      return rt;
  }
  
  #if IS_ENABLED(CONFIG_IPV6)
  static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
                                          struct sk_buff *skb, int oif,
                                          const struct in6_addr *daddr,
 -                                        struct in6_addr *saddr)
 +                                        struct in6_addr *saddr,
 +                                        struct dst_cache *dst_cache)
  {
        struct dst_entry *ndst;
        struct flowi6 fl6;
        int err;
  
 +      if (!skb->mark) {
 +              ndst = dst_cache_get_ip6(dst_cache, saddr);
 +              if (ndst)
 +                      return ndst;
 +      }
 +
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
        fl6.daddr = *daddr;
                return ERR_PTR(err);
  
        *saddr = fl6.saddr;
 +      if (!skb->mark)
 +              dst_cache_set_ip6(dst_cache, ndst, saddr);
        return ndst;
  }
  #endif
@@@ -1871,24 -1927,22 +1874,24 @@@ static void vxlan_encap_bypass(struct s
  static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                           struct vxlan_rdst *rdst, bool did_rsc)
  {
 +      struct dst_cache *dst_cache;
        struct ip_tunnel_info *info;
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct sock *sk;
        struct rtable *rt = NULL;
        const struct iphdr *old_iph;
 -      struct flowi4 fl4;
        union vxlan_addr *dst;
        union vxlan_addr remote_ip;
        struct vxlan_metadata _md;
        struct vxlan_metadata *md = &_md;
        __be16 src_port = 0, dst_port;
 -      u32 vni;
 +      __be32 vni;
        __be16 df = 0;
        __u8 tos, ttl;
        int err;
        u32 flags = vxlan->flags;
 +      bool udp_sum = false;
 +      bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
  
        info = skb_tunnel_info(skb);
  
                dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
                vni = rdst->remote_vni;
                dst = &rdst->remote_ip;
 +              dst_cache = &rdst->dst_cache;
        } else {
                if (!info) {
                        WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
                        goto drop;
                }
                dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
 -              vni = be64_to_cpu(info->key.tun_id);
 +              vni = vxlan_tun_id_to_vni(info->key.tun_id);
                remote_ip.sa.sa_family = ip_tunnel_info_af(info);
                if (remote_ip.sa.sa_family == AF_INET)
                        remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
                else
                        remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
                dst = &remote_ip;
 +              dst_cache = &info->dst_cache;
        }
  
        if (vxlan_addr_any(dst)) {
        if (info) {
                ttl = info->key.ttl;
                tos = info->key.tos;
 +              udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
  
                if (info->options_len)
                        md = ip_tunnel_info_opts(info);
        }
  
        if (dst->sa.sa_family == AF_INET) {
 +              __be32 saddr;
 +
                if (!vxlan->vn4_sock)
                        goto drop;
                sk = vxlan->vn4_sock->sock->sk;
  
 -              if (info) {
 -                      if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
 -                              df = htons(IP_DF);
 -
 -                      if (info->key.tun_flags & TUNNEL_CSUM)
 -                              flags |= VXLAN_F_UDP_CSUM;
 -                      else
 -                              flags &= ~VXLAN_F_UDP_CSUM;
 -              }
 -
 -              memset(&fl4, 0, sizeof(fl4));
 -              fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
 -              fl4.flowi4_tos = RT_TOS(tos);
 -              fl4.flowi4_mark = skb->mark;
 -              fl4.flowi4_proto = IPPROTO_UDP;
 -              fl4.daddr = dst->sin.sin_addr.s_addr;
 -              fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
 -
 -              rt = ip_route_output_key(vxlan->net, &fl4);
 +              rt = vxlan_get_route(vxlan, skb,
 +                                   rdst ? rdst->remote_ifindex : 0, tos,
 +                                   dst->sin.sin_addr.s_addr, &saddr,
 +                                   dst_cache, info);
                if (IS_ERR(rt)) {
                        netdev_dbg(dev, "no route to %pI4\n",
                                   &dst->sin.sin_addr.s_addr);
                        return;
                }
  
 +              if (!info)
 +                      udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
 +              else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
 +                      df = htons(IP_DF);
 +
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
 -              err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
 -                                   dst->sin.sin_addr.s_addr, tos, ttl, df,
 -                                   src_port, dst_port, htonl(vni << 8), md,
 -                                   !net_eq(vxlan->net, dev_net(vxlan->dev)),
 -                                   flags);
 -              if (err < 0) {
 -                      /* skb is already freed. */
 -                      skb = NULL;
 -                      goto rt_tx_error;
 -              }
 +              err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr),
 +                                    vni, md, flags, udp_sum);
 +              if (err < 0)
 +                      goto xmit_tx_error;
 +
 +              udp_tunnel_xmit_skb(rt, sk, skb, saddr,
 +                                  dst->sin.sin_addr.s_addr, tos, ttl, df,
 +                                  src_port, dst_port, xnet, !udp_sum);
  #if IS_ENABLED(CONFIG_IPV6)
        } else {
                struct dst_entry *ndst;
  
                ndst = vxlan6_get_route(vxlan, skb,
                                        rdst ? rdst->remote_ifindex : 0,
 -                                      &dst->sin6.sin6_addr, &saddr);
 +                                      &dst->sin6.sin6_addr, &saddr,
 +                                      dst_cache);
                if (IS_ERR(ndst)) {
                        netdev_dbg(dev, "no route to %pI6\n",
                                   &dst->sin6.sin6_addr);
                        return;
                }
  
 -              if (info) {
 -                      if (info->key.tun_flags & TUNNEL_CSUM)
 -                              flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX;
 -                      else
 -                              flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
 -              }
 +              if (!info)
 +                      udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
  
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
 -              err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
 -                                    0, ttl, src_port, dst_port, htonl(vni << 8), md,
 -                                    !net_eq(vxlan->net, dev_net(vxlan->dev)),
 -                                    flags);
 +              skb_scrub_packet(skb, xnet);
 +              err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
 +                                    vni, md, flags, udp_sum);
 +              if (err < 0) {
 +                      dst_release(ndst);
 +                      return;
 +              }
 +              udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
 +                                   &saddr, &dst->sin6.sin6_addr,
 +                                   0, ttl, src_port, dst_port, !udp_sum);
  #endif
        }
  
@@@ -2070,9 -2128,6 +2073,9 @@@ drop
        dev->stats.tx_dropped++;
        goto tx_free;
  
 +xmit_tx_error:
 +      /* skb is already freed. */
 +      skb = NULL;
  rt_tx_error:
        ip_rt_put(rt);
  tx_error:
@@@ -2212,7 -2267,7 +2215,7 @@@ static void vxlan_cleanup(unsigned lon
  static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
  {
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 -      __u32 vni = vxlan->default_dst.remote_vni;
 +      __be32 vni = vxlan->default_dst.remote_vni;
  
        spin_lock(&vn->sock_lock);
        hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
@@@ -2355,6 -2410,31 +2358,6 @@@ static int vxlan_change_mtu(struct net_
        return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
  }
  
 -static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
 -                              struct ip_tunnel_info *info,
 -                              __be16 sport, __be16 dport)
 -{
 -      struct vxlan_dev *vxlan = netdev_priv(dev);
 -      struct rtable *rt;
 -      struct flowi4 fl4;
 -
 -      memset(&fl4, 0, sizeof(fl4));
 -      fl4.flowi4_tos = RT_TOS(info->key.tos);
 -      fl4.flowi4_mark = skb->mark;
 -      fl4.flowi4_proto = IPPROTO_UDP;
 -      fl4.daddr = info->key.u.ipv4.dst;
 -
 -      rt = ip_route_output_key(vxlan->net, &fl4);
 -      if (IS_ERR(rt))
 -              return PTR_ERR(rt);
 -      ip_rt_put(rt);
 -
 -      info->key.u.ipv4.src = fl4.saddr;
 -      info->key.tp_src = sport;
 -      info->key.tp_dst = dport;
 -      return 0;
 -}
 -
  static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
  {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
  
        if (ip_tunnel_info_af(info) == AF_INET) {
 +              struct rtable *rt;
 +
                if (!vxlan->vn4_sock)
                        return -EINVAL;
 -              return egress_ipv4_tun_info(dev, skb, info, sport, dport);
 +              rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
 +                                   info->key.u.ipv4.dst,
 +                                   &info->key.u.ipv4.src, NULL, info);
 +              if (IS_ERR(rt))
 +                      return PTR_ERR(rt);
 +              ip_rt_put(rt);
        } else {
  #if IS_ENABLED(CONFIG_IPV6)
                struct dst_entry *ndst;
                        return -EINVAL;
                ndst = vxlan6_get_route(vxlan, skb, 0,
                                        &info->key.u.ipv6.dst,
 -                                      &info->key.u.ipv6.src);
 +                                      &info->key.u.ipv6.src, NULL);
                if (IS_ERR(ndst))
                        return PTR_ERR(ndst);
                dst_release(ndst);
 -
 -              info->key.tp_src = sport;
 -              info->key.tp_dst = dport;
  #else /* !CONFIG_IPV6 */
                return -EPFNOSUPPORT;
  #endif
        }
 +      info->key.tp_src = sport;
 +      info->key.tp_dst = dport;
        return 0;
  }
  
@@@ -2645,7 -2719,7 +2648,7 @@@ static struct vxlan_sock *vxlan_socket_
        /* Mark socket as an encapsulation socket. */
        tunnel_cfg.sk_user_data = vs;
        tunnel_cfg.encap_type = 1;
 -      tunnel_cfg.encap_rcv = vxlan_udp_encap_recv;
 +      tunnel_cfg.encap_rcv = vxlan_rcv;
        tunnel_cfg.encap_destroy = NULL;
  
        setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
@@@ -2847,7 -2921,7 +2850,7 @@@ static int vxlan_newlink(struct net *sr
        memset(&conf, 0, sizeof(conf));
  
        if (data[IFLA_VXLAN_ID])
 -              conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
 +              conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
  
        if (data[IFLA_VXLAN_GROUP]) {
                conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
        if (data[IFLA_VXLAN_PORT])
                conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
  
 -      if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
 -              conf.flags |= VXLAN_F_UDP_CSUM;
 +      if (data[IFLA_VXLAN_UDP_CSUM] &&
 +          !nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
 +              conf.flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
  
        if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
            nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
                break;
  
        case -EEXIST:
 -              pr_info("duplicate VNI %u\n", conf.vni);
 +              pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
                break;
        }
  
@@@ -3010,7 -3083,7 +3013,7 @@@ static int vxlan_fill_info(struct sk_bu
                .high = htons(vxlan->cfg.port_max),
        };
  
 -      if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
 +      if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
                goto nla_put_failure;
  
        if (!vxlan_addr_any(&dst->remote_ip)) {
            nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
            nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
            nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
 -                      !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
 +                      !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
            nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
                        !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
            nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
index 070e2af05ca25bc5545373e3738c8c30b67cd283,0ccc697fef76cf25c94443c4cde9d0fb1185df05..62ae43d2ab7b32f07bb37b20de11198f53010b0c
@@@ -7,7 -7,6 +7,7 @@@
   *
   * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
   * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
 + * Copyright(c) 2016 Intel Deutschland GmbH
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of version 2 of the GNU General Public License as
@@@ -108,25 -107,7 +108,25 @@@ static int iwl_send_tx_ant_cfg(struct i
                                    sizeof(tx_ant_cmd), &tx_ant_cmd);
  }
  
- static void iwl_free_fw_paging(struct iwl_mvm *mvm)
 +static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
 +{
 +      int i;
 +      struct iwl_rss_config_cmd cmd = {
 +              .flags = cpu_to_le32(IWL_RSS_ENABLE),
 +              .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
 +                           IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
 +                           IWL_RSS_HASH_TYPE_IPV6_TCP |
 +                           IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
 +      };
 +
 +      for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
 +              cmd.indirection_table[i] = i % mvm->trans->num_rx_queues;
 +      memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key));
 +
 +      return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
 +}
 +
+ void iwl_free_fw_paging(struct iwl_mvm *mvm)
  {
        int i;
  
                             get_order(mvm->fw_paging_db[i].fw_paging_size));
        }
        kfree(mvm->trans->paging_download_buf);
+       mvm->trans->paging_download_buf = NULL;
        memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
  }
  
@@@ -913,16 -896,6 +915,16 @@@ int iwl_mvm_up(struct iwl_mvm *mvm
        if (ret)
                goto error;
  
 +      /* Init RSS configuration */
 +      if (iwl_mvm_has_new_rx_api(mvm)) {
 +              ret = iwl_send_rss_cfg_cmd(mvm);
 +              if (ret) {
 +                      IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
 +                              ret);
 +                      goto error;
 +              }
 +      }
 +
        /* init the fw <-> mac80211 STA mapping */
        for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
                RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
index ebe37bb0ce4c4f42643bc6443d4b55246de132f3,ff7c6df9f9418ebac294709f63c21911459cfac1..cc279e0961fad467f4ee1ef1fb80153ff4c784ba
@@@ -346,9 -346,8 +346,9 @@@ struct iwl_mvm_vif_bf_data 
   * @pm_enabled - Indicate if MAC power management is allowed
   * @monitor_active: indicates that monitor context is configured, and that the
   *    interface should get quota etc.
 - * @low_latency: indicates that this interface is in low-latency mode
 - *    (VMACLowLatencyMode)
 + * @low_latency_traffic: indicates low latency traffic was detected
 + * @low_latency_dbgfs: low latency mode set from debugfs
 + * @low_latency_vcmd: low latency mode set from vendor command
   * @ps_disabled: indicates that this interface requires PS to be disabled
   * @queue_params: QoS params for this MAC
   * @bcast_sta: station used for broadcast packets. Used by the following
@@@ -376,7 -375,7 +376,7 @@@ struct iwl_mvm_vif 
        bool ap_ibss_active;
        bool pm_enabled;
        bool monitor_active;
 -      bool low_latency;
 +      bool low_latency_traffic, low_latency_dbgfs, low_latency_vcmd;
        bool ps_disabled;
        struct iwl_mvm_vif_bf_data bf_data;
  
        struct iwl_dbgfs_pm dbgfs_pm;
        struct iwl_dbgfs_bf dbgfs_bf;
        struct iwl_mac_power_cmd mac_pwr_cmd;
 +      int dbgfs_quota_min;
  #endif
  
        enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
@@@ -647,7 -645,6 +647,7 @@@ struct iwl_mvm 
        atomic_t pending_frames[IWL_MVM_STATION_COUNT];
        u32 tfd_drained[IWL_MVM_STATION_COUNT];
        u8 rx_ba_sessions;
 +      u32 secret_key[IWL_RSS_HASH_KEY_CNT];
  
        /* configured by mac80211 */
        u32 rts_threshold;
  
        u32 ciphers[6];
        struct iwl_mvm_tof_data tof_data;
 +
 +      /*
 +       * Drop beacons from other APs in AP mode when there are no connected
 +       * clients.
 +       */
 +      bool drop_bcn_ap_mode;
  };
  
  /* Extract MVM priv from op_mode and _hw */
@@@ -1014,18 -1005,10 +1014,18 @@@ static inline bool iwl_mvm_is_mplut_sup
                IWL_MVM_BT_COEX_MPLUT;
  }
  
 +static inline
 +bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm)
 +{
 +      return fw_has_capa(&mvm->fw->ucode_capa,
 +                         IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) &&
 +              IWL_MVM_P2P_UAPSD_STANDALONE;
 +}
 +
  static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
  {
 -      /* firmware flag isn't defined yet */
 -      return false;
 +      return fw_has_capa(&mvm->fw->ucode_capa,
 +                         IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
  }
  
  extern const u8 iwl_mvm_ac_to_tx_fifo[];
@@@ -1201,8 -1184,6 +1201,8 @@@ void iwl_mvm_rx_beacon_notif(struct iwl
                             struct iwl_rx_cmd_buffer *rxb);
  void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
                                     struct iwl_rx_cmd_buffer *rxb);
 +void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
 +                                  struct iwl_rx_cmd_buffer *rxb);
  void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif);
  unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
@@@ -1244,6 -1225,9 +1244,9 @@@ void iwl_mvm_rx_umac_scan_complete_noti
  void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
                                              struct iwl_rx_cmd_buffer *rxb);
  
+ /* Paging */
+ void iwl_free_fw_paging(struct iwl_mvm *mvm);
  /* MVM debugfs */
  #ifdef CONFIG_IWLWIFI_DEBUGFS
  int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
@@@ -1436,9 -1420,8 +1439,9 @@@ static inline bool iwl_mvm_vif_low_late
         * binding, so this has no real impact. For now, just return
         * the current desired low-latency state.
         */
 -
 -      return mvmvif->low_latency;
 +      return mvmvif->low_latency_dbgfs ||
 +             mvmvif->low_latency_traffic ||
 +             mvmvif->low_latency_vcmd;
  }
  
  /* hw scheduler queue config */
@@@ -1501,7 -1484,7 +1504,7 @@@ void iwl_mvm_tt_handler(struct iwl_mvm 
  void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
  void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
  void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
 -int iwl_mvm_get_temp(struct iwl_mvm *mvm);
 +int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp);
  
  /* Location Aware Regulatory */
  struct iwl_mcc_update_resp *
index 09a94a5efb61111d150fa1616e21467a922485d9,e80be9a595207bcf962551cc2bde42d14b898a03..fd8f4a80ddfc94d164f991bc8a0d94a3a3949336
@@@ -33,7 -33,6 +33,7 @@@
   *
   * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
   * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
 + * Copyright(c) 2016 Intel Deutschland GmbH
   * All rights reserved.
   *
   * Redistribution and use in source and binary forms, with or without
@@@ -268,8 -267,6 +268,8 @@@ static const struct iwl_rx_handlers iwl
                   true),
        RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
        RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
 +      RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
 +                     iwl_mvm_rx_stored_beacon_notif, false),
  
  };
  #undef RX_HANDLER
@@@ -347,7 -344,6 +347,7 @@@ static const struct iwl_hcmd_names iwl_
        HCMD_NAME(MAC_PM_POWER_TABLE),
        HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
        HCMD_NAME(MFUART_LOAD_NOTIFICATION),
 +      HCMD_NAME(RSS_CONFIG_CMD),
        HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
        HCMD_NAME(REPLY_RX_PHY_CMD),
        HCMD_NAME(REPLY_RX_MPDU_CMD),
@@@ -390,20 -386,13 +390,20 @@@ static const struct iwl_hcmd_names iwl_
        HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
  };
  
 +/* Please keep this array *SORTED* by hex value.
 + * Access is done through binary search
 + */
 +static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
 +      HCMD_NAME(STORED_BEACON_NTF),
 +};
 +
  static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
        [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
        [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
        [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
 +      [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
  };
  
 -
  /* this forward declaration can avoid to export the function */
  static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
  static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
@@@ -492,7 -481,6 +492,7 @@@ iwl_op_mode_mvm_start(struct iwl_trans 
        }
        mvm->sf_state = SF_UNINIT;
        mvm->cur_ucode = IWL_UCODE_INIT;
 +      mvm->drop_bcn_ap_mode = true;
  
        mutex_init(&mvm->mutex);
        mutex_init(&mvm->d0i3_suspend_mutex);
  
        iwl_mvm_tof_init(mvm);
  
 +      /* init RSS hash key */
 +      get_random_bytes(mvm->secret_key, ARRAY_SIZE(mvm->secret_key));
 +
        return op_mode;
  
   out_unregister:
@@@ -699,6 -684,8 +699,8 @@@ static void iwl_op_mode_mvm_stop(struc
        for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
                kfree(mvm->nvm_sections[i].data);
  
+       iwl_free_fw_paging(mvm);
        iwl_mvm_tof_clean(mvm);
  
        ieee80211_free_hw(mvm->hw);
@@@ -1211,7 -1198,7 +1213,7 @@@ static void iwl_mvm_set_wowlan_data(str
        cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
        cmd->offloading_tid = iter_data->offloading_tid;
        cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
 -              ENABLE_DHCP_FILTERING;
 +              ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
        /*
         * The d0i3 uCode takes care of the nonqos counters,
         * so configure only the qos seq ones.
@@@ -1232,7 -1219,8 +1234,7 @@@ int iwl_mvm_enter_d0i3(struct iwl_op_mo
        struct iwl_wowlan_config_cmd wowlan_config_cmd = {
                .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
                                             IWL_WOWLAN_WAKEUP_BEACON_MISS |
 -                                           IWL_WOWLAN_WAKEUP_LINK_CHANGE |
 -                                           IWL_WOWLAN_WAKEUP_BCN_FILTERING),
 +                                           IWL_WOWLAN_WAKEUP_LINK_CHANGE),
        };
        struct iwl_d3_manager_config d3_cfg_cmd = {
                .min_sleep_time = cpu_to_le32(1000),
  
        /* configure wowlan configuration only if needed */
        if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
 +              /* wake on beacons only if beacon storing isn't supported */
 +              if (!fw_has_capa(&mvm->fw->ucode_capa,
 +                               IWL_UCODE_TLV_CAPA_BEACON_STORING))
 +                      wowlan_config_cmd.wakeup_filter |=
 +                              cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
 +
                iwl_mvm_wowlan_config_key_params(mvm,
                                                 d0i3_iter_data.connected_vif,
                                                 true, flags);
index 4fbaadda4e99db25aecec472f4b5c2ff802229e8,a040edc550570a11373ed30b4916b7930c7fa832..dae2c40d605c2b1ce2b6ff47e759200ae9792ba1
@@@ -299,8 -299,6 +299,8 @@@ static void iwl_mvm_set_tx_cmd_crypto(s
  
        case WLAN_CIPHER_SUITE_TKIP:
                tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
 +              pn = atomic64_inc_return(&keyconf->tx_pn);
 +              ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
                ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
                break;
  
@@@ -425,6 -423,15 +425,15 @@@ int iwl_mvm_tx_skb_non_sta(struct iwl_m
                return -1;
        }
  
+       /*
+        * Increase the pending frames counter, so that later when a reply comes
+        * in and the counter is decreased - we don't start getting negative
+        * values.
+        * Note that we don't need to make sure it isn't agg'd, since we're
+        * TXing non-sta
+        */
+       atomic_inc(&mvm->pending_frames[sta_id]);
        return 0;
  }
  
@@@ -738,37 -745,6 +747,37 @@@ static void iwl_mvm_hwrate_to_tx_status
        iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
  }
  
 +static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
 +                                          u32 status)
 +{
 +      struct iwl_fw_dbg_trigger_tlv *trig;
 +      struct iwl_fw_dbg_trigger_tx_status *status_trig;
 +      int i;
 +
 +      if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
 +              return;
 +
 +      trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
 +      status_trig = (void *)trig->data;
 +
 +      if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
 +              return;
 +
 +      for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
 +              /* don't collect on status 0 */
 +              if (!status_trig->statuses[i].status)
 +                      break;
 +
 +              if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
 +                      continue;
 +
 +              iwl_mvm_fw_dbg_collect_trig(mvm, trig,
 +                                          "Tx status %d was received",
 +                                          status & TX_STATUS_MSK);
 +              break;
 +      }
 +}
 +
  static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                                     struct iwl_rx_packet *pkt)
  {
                        break;
                }
  
 +              iwl_mvm_tx_status_check_trigger(mvm, status);
 +
                info->status.rates[0].count = tx_resp->failure_frame + 1;
                iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
                                            info);
diff --combined drivers/of/of_mdio.c
index 669739b302b2c584fc0f3072aed8c9c245157c0c,365dc7e83ab43cd3473d0da57d0ea768784869c8..5e7838290998edb9ec7737b2a6bf2380ebdffbc2
@@@ -211,6 -211,7 +211,6 @@@ static bool of_mdiobus_child_is_phy(str
  int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
  {
        struct device_node *child;
 -      const __be32 *paddr;
        bool scanphys = false;
        int addr, rc;
  
        /* auto scan for PHYs with empty reg property */
        for_each_available_child_of_node(np, child) {
                /* Skip PHYs with reg property set */
 -              paddr = of_get_property(child, "reg", NULL);
 -              if (paddr)
 +              if (of_find_property(child, "reg", NULL))
                        continue;
  
                for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
@@@ -303,6 -305,7 +303,7 @@@ EXPORT_SYMBOL(of_phy_find_device)
   * @dev: pointer to net_device claiming the phy
   * @phy_np: Pointer to device tree node for the PHY
   * @hndlr: Link state callback for the network device
+  * @flags: flags to pass to the PHY
   * @iface: PHY data interface type
   *
   * If successful, returns a pointer to the phy_device with the embedded
index 03ffe953036554f01f0559b167e70560f0fcf046,58eef02edc7e81ab2dcd14aace9c722b76a89623..9d91ce39eb0f0e9b43f5927c77a3cd5908a35bc7
@@@ -166,8 -166,6 +166,8 @@@ enum 
        MLX5_CMD_OP_SET_L2_TABLE_ENTRY            = 0x829,
        MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY          = 0x82a,
        MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY         = 0x82b,
 +      MLX5_CMD_OP_SET_WOL_ROL                   = 0x830,
 +      MLX5_CMD_OP_QUERY_WOL_ROL                 = 0x831,
        MLX5_CMD_OP_CREATE_TIR                    = 0x900,
        MLX5_CMD_OP_MODIFY_TIR                    = 0x901,
        MLX5_CMD_OP_DESTROY_TIR                   = 0x902,
@@@ -731,19 -729,7 +731,19 @@@ struct mlx5_ifc_cmd_hca_cap_bits 
  
        u8         reserved_at_1bf[0x3];
        u8         log_max_msg[0x5];
 -      u8         reserved_at_1c7[0x18];
 +      u8         reserved_at_1c7[0x4];
 +      u8         max_tc[0x4];
 +      u8         reserved_at_1cf[0x6];
 +      u8         rol_s[0x1];
 +      u8         rol_g[0x1];
 +      u8         reserved_at_1d7[0x1];
 +      u8         wol_s[0x1];
 +      u8         wol_g[0x1];
 +      u8         wol_a[0x1];
 +      u8         wol_b[0x1];
 +      u8         wol_m[0x1];
 +      u8         wol_u[0x1];
 +      u8         wol_p[0x1];
  
        u8         stat_rate_support[0x10];
        u8         reserved_at_1ef[0xc];
@@@ -4259,7 -4245,9 +4259,9 @@@ struct mlx5_ifc_modify_tir_bitmask_bit
  
        u8         reserved_at_20[0x1b];
        u8         self_lb_en[0x1];
-       u8         reserved_at_3c[0x3];
+       u8         reserved_at_3c[0x1];
+       u8         hash[0x1];
+       u8         reserved_at_3e[0x1];
        u8         lro[0x1];
  };
  
@@@ -6885,54 -6873,6 +6887,54 @@@ struct mlx5_ifc_mtt_bits 
        u8         rd_en[0x1];
  };
  
 +struct mlx5_ifc_query_wol_rol_out_bits {
 +      u8         status[0x8];
 +      u8         reserved_at_8[0x18];
 +
 +      u8         syndrome[0x20];
 +
 +      u8         reserved_at_40[0x10];
 +      u8         rol_mode[0x8];
 +      u8         wol_mode[0x8];
 +
 +      u8         reserved_at_60[0x20];
 +};
 +
 +struct mlx5_ifc_query_wol_rol_in_bits {
 +      u8         opcode[0x10];
 +      u8         reserved_at_10[0x10];
 +
 +      u8         reserved_at_20[0x10];
 +      u8         op_mod[0x10];
 +
 +      u8         reserved_at_40[0x40];
 +};
 +
 +struct mlx5_ifc_set_wol_rol_out_bits {
 +      u8         status[0x8];
 +      u8         reserved_at_8[0x18];
 +
 +      u8         syndrome[0x20];
 +
 +      u8         reserved_at_40[0x40];
 +};
 +
 +struct mlx5_ifc_set_wol_rol_in_bits {
 +      u8         opcode[0x10];
 +      u8         reserved_at_10[0x10];
 +
 +      u8         reserved_at_20[0x10];
 +      u8         op_mod[0x10];
 +
 +      u8         rol_mode_valid[0x1];
 +      u8         wol_mode_valid[0x1];
 +      u8         reserved_at_42[0xe];
 +      u8         rol_mode[0x8];
 +      u8         wol_mode[0x8];
 +
 +      u8         reserved_at_60[0x20];
 +};
 +
  enum {
        MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER  = 0x0,
        MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED     = 0x1,
@@@ -7123,49 -7063,4 +7125,49 @@@ struct mlx5_ifc_modify_flow_table_in_bi
        u8         reserved_at_100[0x100];
  };
  
 +struct mlx5_ifc_ets_tcn_config_reg_bits {
 +      u8         g[0x1];
 +      u8         b[0x1];
 +      u8         r[0x1];
 +      u8         reserved_at_3[0x9];
 +      u8         group[0x4];
 +      u8         reserved_at_10[0x9];
 +      u8         bw_allocation[0x7];
 +
 +      u8         reserved_at_20[0xc];
 +      u8         max_bw_units[0x4];
 +      u8         reserved_at_30[0x8];
 +      u8         max_bw_value[0x8];
 +};
 +
 +struct mlx5_ifc_ets_global_config_reg_bits {
 +      u8         reserved_at_0[0x2];
 +      u8         r[0x1];
 +      u8         reserved_at_3[0x1d];
 +
 +      u8         reserved_at_20[0xc];
 +      u8         max_bw_units[0x4];
 +      u8         reserved_at_30[0x8];
 +      u8         max_bw_value[0x8];
 +};
 +
 +struct mlx5_ifc_qetc_reg_bits {
 +      u8                                         reserved_at_0[0x8];
 +      u8                                         port_number[0x8];
 +      u8                                         reserved_at_10[0x30];
 +
 +      struct mlx5_ifc_ets_tcn_config_reg_bits    tc_configuration[0x8];
 +      struct mlx5_ifc_ets_global_config_reg_bits global_configuration;
 +};
 +
 +struct mlx5_ifc_qtct_reg_bits {
 +      u8         reserved_at_0[0x8];
 +      u8         port_number[0x8];
 +      u8         reserved_at_10[0xd];
 +      u8         prio[0x3];
 +
 +      u8         reserved_at_20[0x1d];
 +      u8         tclass[0x3];
 +};
 +
  #endif /* MLX5_IFC_H */
index 7da3c25999dff5d13c814230da799e4fa98b27a2,f5c5a3fa2c8101cc37ea917d29dec30713ec0699..0967a246745743b6ee9cd0ed69b947a421ec7bc5
@@@ -397,6 -397,7 +397,7 @@@ struct pmu 
   * enum perf_event_active_state - the states of a event
   */
  enum perf_event_active_state {
+       PERF_EVENT_STATE_DEAD           = -4,
        PERF_EVENT_STATE_EXIT           = -3,
        PERF_EVENT_STATE_ERROR          = -2,
        PERF_EVENT_STATE_OFF            = -1,
@@@ -905,7 -906,7 +906,7 @@@ perf_sw_event_sched(u32 event_id, u64 n
        }
  }
  
- extern struct static_key_deferred perf_sched_events;
+ extern struct static_key_false perf_sched_events;
  
  static __always_inline bool
  perf_sw_migrate_enabled(void)
@@@ -924,7 -925,7 +925,7 @@@ static inline void perf_event_task_migr
  static inline void perf_event_task_sched_in(struct task_struct *prev,
                                            struct task_struct *task)
  {
-       if (static_key_false(&perf_sched_events.key))
+       if (static_branch_unlikely(&perf_sched_events))
                __perf_event_task_sched_in(prev, task);
  
        if (perf_sw_migrate_enabled() && task->sched_migrated) {
@@@ -941,7 -942,7 +942,7 @@@ static inline void perf_event_task_sche
  {
        perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
  
-       if (static_key_false(&perf_sched_events.key))
+       if (static_branch_unlikely(&perf_sched_events))
                __perf_event_task_sched_out(prev, next);
  }
  
@@@ -964,20 -965,11 +965,20 @@@ DECLARE_PER_CPU(struct perf_callchain_e
  
  extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
  extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
 +extern struct perf_callchain_entry *
 +get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
 +                 bool crosstask, bool add_mark);
 +extern int get_callchain_buffers(void);
 +extern void put_callchain_buffers(void);
  
 -static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
 +static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
  {
 -      if (entry->nr < PERF_MAX_STACK_DEPTH)
 +      if (entry->nr < PERF_MAX_STACK_DEPTH) {
                entry->ip[entry->nr++] = ip;
 +              return 0;
 +      } else {
 +              return -1; /* no more room, stop walking the stack */
 +      }
  }
  
  extern int sysctl_perf_event_paranoid;
diff --combined include/linux/skbuff.h
index 797cefb888fb075a9e6c458a0c3d05edfe5d3e35,d3fcd4591ce4ac1312a35710c81a9ff51211c140..15d0df9434667922894936216d17bf88120d60cc
@@@ -1161,6 -1161,10 +1161,6 @@@ static inline void skb_copy_hash(struc
        to->l4_hash = from->l4_hash;
  };
  
 -static inline void skb_sender_cpu_clear(struct sk_buff *skb)
 -{
 -}
 -
  #ifdef NET_SKBUFF_DATA_USES_OFFSET
  static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
  {
@@@ -1981,6 -1985,30 +1981,30 @@@ static inline void skb_reserve(struct s
        skb->tail += len;
  }
  
+ /**
+  *    skb_tailroom_reserve - adjust reserved_tailroom
+  *    @skb: buffer to alter
+  *    @mtu: maximum amount of headlen permitted
+  *    @needed_tailroom: minimum amount of reserved_tailroom
+  *
+  *    Set reserved_tailroom so that headlen can be as large as possible but
+  *    not larger than mtu and tailroom cannot be smaller than
+  *    needed_tailroom.
+  *    The required headroom should already have been reserved before using
+  *    this function.
+  */
+ static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
+                                       unsigned int needed_tailroom)
+ {
+       SKB_LINEAR_ASSERT(skb);
+       if (mtu < skb_tailroom(skb) - needed_tailroom)
+               /* use at most mtu */
+               skb->reserved_tailroom = skb_tailroom(skb) - mtu;
+       else
+               /* use up to all available space */
+               skb->reserved_tailroom = needed_tailroom;
+ }
  #define ENCAP_TYPE_ETHER      0
  #define ENCAP_TYPE_IPPROTO    1
  
@@@ -2158,11 -2186,6 +2182,11 @@@ static inline int skb_checksum_start_of
        return skb->csum_start - skb_headroom(skb);
  }
  
 +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
 +{
 +      return skb->head + skb->csum_start;
 +}
 +
  static inline int skb_transport_offset(const struct sk_buff *skb)
  {
        return skb_transport_header(skb) - skb->data;
@@@ -2401,10 -2424,6 +2425,10 @@@ static inline struct sk_buff *napi_allo
  {
        return __napi_alloc_skb(napi, length, GFP_ATOMIC);
  }
 +void napi_consume_skb(struct sk_buff *skb, int budget);
 +
 +void __kfree_skb_flush(void);
 +void __kfree_skb_defer(struct sk_buff *skb);
  
  /**
   * __dev_alloc_pages - allocate page for network Rx
@@@ -2627,13 -2646,6 +2651,13 @@@ static inline int skb_clone_writable(co
               skb_headroom(skb) + len <= skb->hdr_len;
  }
  
 +static inline int skb_try_make_writable(struct sk_buff *skb,
 +                                      unsigned int write_len)
 +{
 +      return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
 +             pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
 +}
 +
  static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
                            int cloned)
  {
@@@ -3562,7 -3574,6 +3586,7 @@@ static inline struct sec_path *skb_sec_
  struct skb_gso_cb {
        int     mac_offset;
        int     encap_level;
 +      __wsum  csum;
        __u16   csum_start;
  };
  #define SKB_SGO_CB_OFFSET     32
@@@ -3589,16 -3600,6 +3613,16 @@@ static inline int gso_pskb_expand_head(
        return 0;
  }
  
 +static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
 +{
 +      /* Do not update partial checksums if remote checksum is enabled. */
 +      if (skb->remcsum_offload)
 +              return;
 +
 +      SKB_GSO_CB(skb)->csum = res;
 +      SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
 +}
 +
  /* Compute the checksum for a gso segment. First compute the checksum value
   * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
   * then add in skb->csum (checksum from csum_start to end of packet).
   */
  static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
  {
 -      int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
 -                 skb_transport_offset(skb);
 -      __wsum partial;
 +      unsigned char *csum_start = skb_transport_header(skb);
 +      int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
 +      __wsum partial = SKB_GSO_CB(skb)->csum;
  
 -      partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
 -      skb->csum = res;
 -      SKB_GSO_CB(skb)->csum_start -= plen;
 +      SKB_GSO_CB(skb)->csum = res;
 +      SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
  
 -      return csum_fold(partial);
 +      return csum_fold(csum_partial(csum_start, plen, partial));
  }
  
  static inline bool skb_is_gso(const struct sk_buff *skb)
@@@ -3706,30 -3708,5 +3730,30 @@@ static inline unsigned int skb_gso_netw
        return hdr_len + skb_gso_transport_seglen(skb);
  }
  
 +/* Local Checksum Offload.
 + * Compute outer checksum based on the assumption that the
 + * inner checksum will be offloaded later.
 + * See Documentation/networking/checksum-offloads.txt for
 + * explanation of how this works.
 + * Fill in outer checksum adjustment (e.g. with sum of outer
 + * pseudo-header) before calling.
 + * Also ensure that inner checksum is in linear data area.
 + */
 +static inline __wsum lco_csum(struct sk_buff *skb)
 +{
 +      unsigned char *csum_start = skb_checksum_start(skb);
 +      unsigned char *l4_hdr = skb_transport_header(skb);
 +      __wsum partial;
 +
 +      /* Start with complement of inner checksum adjustment */
 +      partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
 +                                                  skb->csum_offset));
 +
 +      /* Add in checksum of our headers (incl. outer checksum
 +       * adjustment filled in by caller) and return result.
 +       */
 +      return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
 +}
 +
  #endif        /* __KERNEL__ */
  #endif        /* _LINUX_SKBUFF_H */
diff --combined include/linux/stmmac.h
index 6e53fa8942a470cb74b9baa2dc2b7d228c23876c,881a79d524675d7411b8985fd4a91110f69fd8e5..4bcf5a61aada05b02d33c33928dc4d7d7e9f0bb2
@@@ -90,21 -90,7 +90,21 @@@ struct stmmac_dma_cfg 
        int pbl;
        int fixed_burst;
        int mixed_burst;
 -      int burst_len;
 +      bool aal;
 +};
 +
 +#define AXI_BLEN      7
 +struct stmmac_axi {
 +      bool axi_lpi_en;
 +      bool axi_xit_frm;
 +      u32 axi_wr_osr_lmt;
 +      u32 axi_rd_osr_lmt;
 +      bool axi_kbbe;
 +      bool axi_axi_all;
 +      u32 axi_blen[AXI_BLEN];
 +      bool axi_fb;
 +      bool axi_mb;
 +      bool axi_rb;
  };
  
  struct plat_stmmacenet_data {
        int interface;
        struct stmmac_mdio_bus_data *mdio_bus_data;
        struct device_node *phy_node;
+       struct device_node *mdio_node;
        struct stmmac_dma_cfg *dma_cfg;
        int clk_csr;
        int has_gmac;
        int (*init)(struct platform_device *pdev, void *priv);
        void (*exit)(struct platform_device *pdev, void *priv);
        void *bsp_priv;
 +      struct stmmac_axi *axi;
  };
  #endif
diff --combined include/uapi/linux/bpf.h
index 6496f98d3d681f5434950a294a1174ac4a275840,5df4881dea7b5e8e42fc1274f967331a7d455600..ee2193287cbe4a3e3e2bcb822b87cda97cf6f2d6
@@@ -81,9 -81,6 +81,9 @@@ enum bpf_map_type 
        BPF_MAP_TYPE_ARRAY,
        BPF_MAP_TYPE_PROG_ARRAY,
        BPF_MAP_TYPE_PERF_EVENT_ARRAY,
 +      BPF_MAP_TYPE_PERCPU_HASH,
 +      BPF_MAP_TYPE_PERCPU_ARRAY,
 +      BPF_MAP_TYPE_STACK_TRACE,
  };
  
  enum bpf_prog_type {
@@@ -273,31 -270,6 +273,31 @@@ enum bpf_func_id 
         */
        BPF_FUNC_perf_event_output,
        BPF_FUNC_skb_load_bytes,
 +
 +      /**
 +       * bpf_get_stackid(ctx, map, flags) - walk user or kernel stack and return id
 +       * @ctx: struct pt_regs*
 +       * @map: pointer to stack_trace map
 +       * @flags: bits 0-7 - numer of stack frames to skip
 +       *         bit 8 - collect user stack instead of kernel
 +       *         bit 9 - compare stacks by hash only
 +       *         bit 10 - if two different stacks hash into the same stackid
 +       *                  discard old
 +       *         other bits - reserved
 +       * Return: >= 0 stackid on success or negative error
 +       */
 +      BPF_FUNC_get_stackid,
 +
 +      /**
 +       * bpf_csum_diff(from, from_size, to, to_size, seed) - calculate csum diff
 +       * @from: raw from buffer
 +       * @from_size: length of from buffer
 +       * @to: raw to buffer
 +       * @to_size: length of to buffer
 +       * @seed: optional seed
 +       * Return: csum result
 +       */
 +      BPF_FUNC_csum_diff,
        __BPF_FUNC_MAX_ID,
  };
  
  
  /* BPF_FUNC_l4_csum_replace flags. */
  #define BPF_F_PSEUDO_HDR              (1ULL << 4)
 +#define BPF_F_MARK_MANGLED_0          (1ULL << 5)
  
  /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
  #define BPF_F_INGRESS                 (1ULL << 0)
  /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
  #define BPF_F_TUNINFO_IPV6            (1ULL << 0)
  
 +/* BPF_FUNC_get_stackid flags. */
 +#define BPF_F_SKIP_FIELD_MASK         0xffULL
 +#define BPF_F_USER_STACK              (1ULL << 8)
 +#define BPF_F_FAST_STACK_CMP          (1ULL << 9)
 +#define BPF_F_REUSE_STACKID           (1ULL << 10)
 +
+ /* BPF_FUNC_skb_set_tunnel_key flags. */
+ #define BPF_F_ZERO_CSUM_TX            (1ULL << 1)
  /* user accessible mirror of in-kernel sk_buff.
   * new fields can only be added to the end of this structure
   */
diff --combined net/core/filter.c
index 5e2a3b5e5196208b31e9c33fd09c853cba19fafc,bba502f7cd575692a9ed795f83d2fdedf2428ed6..69f4ffc0a282fbc57a37fb453a7123c280a309e2
@@@ -530,14 -530,12 +530,14 @@@ do_pass
                        *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
                        break;
  
 -              /* RET_K, RET_A are remaped into 2 insns. */
 +              /* RET_K is remaped into 2 insns. RET_A case doesn't need an
 +               * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
 +               */
                case BPF_RET | BPF_A:
                case BPF_RET | BPF_K:
 -                      *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
 -                                              BPF_K : BPF_X, BPF_REG_0,
 -                                              BPF_REG_A, fp->k);
 +                      if (BPF_RVAL(fp->code) == BPF_K)
 +                              *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
 +                                                      0, fp->k);
                        *insn = BPF_EXIT_INSN();
                        break;
  
@@@ -1183,7 -1181,7 +1183,7 @@@ static int __reuseport_attach_prog(stru
        if (bpf_prog_size(prog->len) > sysctl_optmem_max)
                return -ENOMEM;
  
 -      if (sk_unhashed(sk)) {
 +      if (sk_unhashed(sk) && sk->sk_reuseport) {
                err = reuseport_alloc(sk);
                if (err)
                        return err;
@@@ -1335,22 -1333,15 +1335,22 @@@ int sk_reuseport_attach_bpf(u32 ufd, st
        return 0;
  }
  
 -#define BPF_LDST_LEN 16U
 +struct bpf_scratchpad {
 +      union {
 +              __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
 +              u8     buff[MAX_BPF_STACK];
 +      };
 +};
 +
 +static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
  
  static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
  {
 +      struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        int offset = (int) r2;
        void *from = (void *) (long) r3;
        unsigned int len = (unsigned int) r4;
 -      char buf[BPF_LDST_LEN];
        void *ptr;
  
        if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM)))
         *
         * so check for invalid 'offset' and too large 'len'
         */
 -      if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
 +      if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
                return -EFAULT;
 -
 -      if (unlikely(skb_cloned(skb) &&
 -                   !skb_clone_writable(skb, offset + len)))
 +      if (unlikely(skb_try_make_writable(skb, offset + len)))
                return -EFAULT;
  
 -      ptr = skb_header_pointer(skb, offset, len, buf);
 +      ptr = skb_header_pointer(skb, offset, len, sp->buff);
        if (unlikely(!ptr))
                return -EFAULT;
  
  
        memcpy(ptr, from, len);
  
 -      if (ptr == buf)
 +      if (ptr == sp->buff)
                /* skb_store_bits cannot return -EFAULT here */
                skb_store_bits(skb, offset, ptr, len);
  
@@@ -1407,7 -1400,7 +1407,7 @@@ static u64 bpf_skb_load_bytes(u64 r1, u
        unsigned int len = (unsigned int) r4;
        void *ptr;
  
 -      if (unlikely((u32) offset > 0xffff || len > BPF_LDST_LEN))
 +      if (unlikely((u32) offset > 0xffff || len > MAX_BPF_STACK))
                return -EFAULT;
  
        ptr = skb_header_pointer(skb, offset, len, to);
@@@ -1439,7 -1432,9 +1439,7 @@@ static u64 bpf_l3_csum_replace(u64 r1, 
                return -EINVAL;
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
 -
 -      if (unlikely(skb_cloned(skb) &&
 -                   !skb_clone_writable(skb, offset + sizeof(sum))))
 +      if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
  
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@@ -1479,31 -1474,23 +1479,31 @@@ static u64 bpf_l4_csum_replace(u64 r1, 
  {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
 +      bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
        int offset = (int) r2;
        __sum16 sum, *ptr;
  
 -      if (unlikely(flags & ~(BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
 +      if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
 +                             BPF_F_HDR_FIELD_MASK)))
                return -EINVAL;
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
 -
 -      if (unlikely(skb_cloned(skb) &&
 -                   !skb_clone_writable(skb, offset + sizeof(sum))))
 +      if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
  
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
        if (unlikely(!ptr))
                return -EFAULT;
 +      if (is_mmzero && !*ptr)
 +              return 0;
  
        switch (flags & BPF_F_HDR_FIELD_MASK) {
 +      case 0:
 +              if (unlikely(from != 0))
 +                      return -EINVAL;
 +
 +              inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
 +              break;
        case 2:
                inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
                break;
                return -EINVAL;
        }
  
 +      if (is_mmzero && !*ptr)
 +              *ptr = CSUM_MANGLED_0;
        if (ptr == &sum)
                /* skb_store_bits guaranteed to not return -EFAULT here */
                skb_store_bits(skb, offset, ptr, sizeof(sum));
@@@ -1534,45 -1519,6 +1534,45 @@@ const struct bpf_func_proto bpf_l4_csum
        .arg5_type      = ARG_ANYTHING,
  };
  
 +static u64 bpf_csum_diff(u64 r1, u64 from_size, u64 r3, u64 to_size, u64 seed)
 +{
 +      struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
 +      u64 diff_size = from_size + to_size;
 +      __be32 *from = (__be32 *) (long) r1;
 +      __be32 *to   = (__be32 *) (long) r3;
 +      int i, j = 0;
 +
 +      /* This is quite flexible, some examples:
 +       *
 +       * from_size == 0, to_size > 0,  seed := csum --> pushing data
 +       * from_size > 0,  to_size == 0, seed := csum --> pulling data
 +       * from_size > 0,  to_size > 0,  seed := 0    --> diffing data
 +       *
 +       * Even for diffing, from_size and to_size don't need to be equal.
 +       */
 +      if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
 +                   diff_size > sizeof(sp->diff)))
 +              return -EINVAL;
 +
 +      for (i = 0; i < from_size / sizeof(__be32); i++, j++)
 +              sp->diff[j] = ~from[i];
 +      for (i = 0; i <   to_size / sizeof(__be32); i++, j++)
 +              sp->diff[j] = to[i];
 +
 +      return csum_partial(sp->diff, diff_size, seed);
 +}
 +
 +const struct bpf_func_proto bpf_csum_diff_proto = {
 +      .func           = bpf_csum_diff,
 +      .gpl_only       = false,
 +      .ret_type       = RET_INTEGER,
 +      .arg1_type      = ARG_PTR_TO_STACK,
 +      .arg2_type      = ARG_CONST_STACK_SIZE_OR_ZERO,
 +      .arg3_type      = ARG_PTR_TO_STACK,
 +      .arg4_type      = ARG_CONST_STACK_SIZE_OR_ZERO,
 +      .arg5_type      = ARG_ANYTHING,
 +};
 +
  static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
  {
        struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
        }
  
        skb2->dev = dev;
 -      skb_sender_cpu_clear(skb2);
        return dev_queue_xmit(skb2);
  }
  
@@@ -1649,6 -1596,7 +1649,6 @@@ int skb_do_redirect(struct sk_buff *skb
        }
  
        skb->dev = dev;
 -      skb_sender_cpu_clear(skb);
        return dev_queue_xmit(skb);
  }
  
@@@ -1734,13 -1682,6 +1734,13 @@@ bool bpf_helper_changes_skb_data(void *
                return true;
        if (func == bpf_skb_vlan_pop)
                return true;
 +      if (func == bpf_skb_store_bytes)
 +              return true;
 +      if (func == bpf_l3_csum_replace)
 +              return true;
 +      if (func == bpf_l4_csum_replace)
 +              return true;
 +
        return false;
  }
  
@@@ -1811,7 -1752,7 +1811,7 @@@ static u64 bpf_skb_set_tunnel_key(u64 r
        u8 compat[sizeof(struct bpf_tunnel_key)];
        struct ip_tunnel_info *info;
  
-       if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6)))
+       if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX)))
                return -EINVAL;
        if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
                switch (size) {
        info = &md->u.tun_info;
        info->mode = IP_TUNNEL_INFO_TX;
  
-       info->key.tun_flags = TUNNEL_KEY;
+       info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM;
        info->key.tun_id = cpu_to_be64(from->tunnel_id);
        info->key.tos = from->tunnel_tos;
        info->key.ttl = from->tunnel_ttl;
                       sizeof(from->remote_ipv6));
        } else {
                info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
+               if (flags & BPF_F_ZERO_CSUM_TX)
+                       info->key.tun_flags &= ~TUNNEL_CSUM;
        }
  
        return 0;
@@@ -1908,8 -1851,6 +1910,8 @@@ tc_cls_act_func_proto(enum bpf_func_id 
                return &bpf_skb_store_bytes_proto;
        case BPF_FUNC_skb_load_bytes:
                return &bpf_skb_load_bytes_proto;
 +      case BPF_FUNC_csum_diff:
 +              return &bpf_csum_diff_proto;
        case BPF_FUNC_l3_csum_replace:
                return &bpf_l3_csum_replace_proto;
        case BPF_FUNC_l4_csum_replace:
diff --combined net/core/rtnetlink.c
index 6128aac01b11248b69835114604c06a299b0eac0,8261d95dd846647798c7dba8f1dbfa4cf61a0eef..d2d9e5ebf58ea827f8e0b5aaa85cea23cd3b77dd
@@@ -804,8 -804,6 +804,8 @@@ static void copy_rtnl_link_stats(struc
  
        a->rx_compressed = b->rx_compressed;
        a->tx_compressed = b->tx_compressed;
 +
 +      a->rx_nohandler = b->rx_nohandler;
  }
  
  static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
@@@ -1391,6 -1389,15 +1391,6 @@@ static const struct nla_policy ifla_vf_
        [IFLA_VF_TRUST]         = { .len = sizeof(struct ifla_vf_trust) },
  };
  
 -static const struct nla_policy ifla_vf_stats_policy[IFLA_VF_STATS_MAX + 1] = {
 -      [IFLA_VF_STATS_RX_PACKETS]      = { .type = NLA_U64 },
 -      [IFLA_VF_STATS_TX_PACKETS]      = { .type = NLA_U64 },
 -      [IFLA_VF_STATS_RX_BYTES]        = { .type = NLA_U64 },
 -      [IFLA_VF_STATS_TX_BYTES]        = { .type = NLA_U64 },
 -      [IFLA_VF_STATS_BROADCAST]       = { .type = NLA_U64 },
 -      [IFLA_VF_STATS_MULTICAST]       = { .type = NLA_U64 },
 -};
 -
  static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
        [IFLA_PORT_VF]          = { .type = NLA_U32 },
        [IFLA_PORT_PROFILE]     = { .type = NLA_STRING,
        [IFLA_PORT_RESPONSE]    = { .type = NLA_U16, },
  };
  
 +static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
 +{
 +      const struct rtnl_link_ops *ops = NULL;
 +      struct nlattr *linfo[IFLA_INFO_MAX + 1];
 +
 +      if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla, ifla_info_policy) < 0)
 +              return NULL;
 +
 +      if (linfo[IFLA_INFO_KIND]) {
 +              char kind[MODULE_NAME_LEN];
 +
 +              nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
 +              ops = rtnl_link_ops_get(kind);
 +      }
 +
 +      return ops;
 +}
 +
 +static bool link_master_filtered(struct net_device *dev, int master_idx)
 +{
 +      struct net_device *master;
 +
 +      if (!master_idx)
 +              return false;
 +
 +      master = netdev_master_upper_dev_get(dev);
 +      if (!master || master->ifindex != master_idx)
 +              return true;
 +
 +      return false;
 +}
 +
 +static bool link_kind_filtered(const struct net_device *dev,
 +                             const struct rtnl_link_ops *kind_ops)
 +{
 +      if (kind_ops && dev->rtnl_link_ops != kind_ops)
 +              return true;
 +
 +      return false;
 +}
 +
 +static bool link_dump_filtered(struct net_device *dev,
 +                             int master_idx,
 +                             const struct rtnl_link_ops *kind_ops)
 +{
 +      if (link_master_filtered(dev, master_idx) ||
 +          link_kind_filtered(dev, kind_ops))
 +              return true;
 +
 +      return false;
 +}
 +
  static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
  {
        struct net *net = sock_net(skb->sk);
        struct hlist_head *head;
        struct nlattr *tb[IFLA_MAX+1];
        u32 ext_filter_mask = 0;
 +      const struct rtnl_link_ops *kind_ops = NULL;
 +      unsigned int flags = NLM_F_MULTI;
 +      int master_idx = 0;
        int err;
        int hdrlen;
  
  
                if (tb[IFLA_EXT_MASK])
                        ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
 +
 +              if (tb[IFLA_MASTER])
 +                      master_idx = nla_get_u32(tb[IFLA_MASTER]);
 +
 +              if (tb[IFLA_LINKINFO])
 +                      kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);
 +
 +              if (master_idx || kind_ops)
 +                      flags |= NLM_F_DUMP_FILTERED;
        }
  
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
                head = &net->dev_index_head[h];
                hlist_for_each_entry(dev, head, index_hlist) {
 +                      if (link_dump_filtered(dev, master_idx, kind_ops))
 +                              continue;
                        if (idx < s_idx)
                                goto cont;
                        err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
                                               NETLINK_CB(cb->skb).portid,
                                               cb->nlh->nlmsg_seq, 0,
 -                                             NLM_F_MULTI,
 +                                             flags,
                                               ext_filter_mask);
                        /* If we ran out of room on the first message,
                         * we're in trouble
@@@ -2970,6 -2911,7 +2970,7 @@@ int ndo_dflt_fdb_dump(struct sk_buff *s
        nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc);
  out:
        netif_addr_unlock_bh(dev);
+       cb->args[1] = err;
        return idx;
  }
  EXPORT_SYMBOL(ndo_dflt_fdb_dump);
@@@ -3003,6 -2945,7 +3004,7 @@@ static int rtnl_fdb_dump(struct sk_buf
                ops = br_dev->netdev_ops;
        }
  
+       cb->args[1] = 0;
        for_each_netdev(net, dev) {
                if (brport_idx && (dev->ifindex != brport_idx))
                        continue;
                                idx = cops->ndo_fdb_dump(skb, cb, br_dev, dev,
                                                         idx);
                }
+               if (cb->args[1] == -EMSGSIZE)
+                       break;
  
                if (dev->netdev_ops->ndo_fdb_dump)
                        idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL,
                                                            idx);
                else
                        idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
+               if (cb->args[1] == -EMSGSIZE)
+                       break;
  
                cops = NULL;
        }
diff --combined net/core/skbuff.c
index 7af7ec635d901b5e2c41058b5fac035c7e4821a7,8616d1147c938808da752734b6cecea260b149b9..9d7be61e5e6b01054aed1ecd3b53f542792a1aac
@@@ -349,16 -349,8 +349,16 @@@ struct sk_buff *build_skb(void *data, u
  }
  EXPORT_SYMBOL(build_skb);
  
 +#define NAPI_SKB_CACHE_SIZE   64
 +
 +struct napi_alloc_cache {
 +      struct page_frag_cache page;
 +      size_t skb_count;
 +      void *skb_cache[NAPI_SKB_CACHE_SIZE];
 +};
 +
  static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
 -static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache);
 +static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
  
  static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
  {
@@@ -388,9 -380,9 +388,9 @@@ EXPORT_SYMBOL(netdev_alloc_frag)
  
  static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
  {
 -      struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 +      struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
  
 -      return __alloc_page_frag(nc, fragsz, gfp_mask);
 +      return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
  }
  
  void *napi_alloc_frag(unsigned int fragsz)
@@@ -484,7 -476,7 +484,7 @@@ EXPORT_SYMBOL(__netdev_alloc_skb)
  struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
                                 gfp_t gfp_mask)
  {
 -      struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 +      struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
        struct sk_buff *skb;
        void *data;
  
        if (sk_memalloc_socks())
                gfp_mask |= __GFP_MEMALLOC;
  
 -      data = __alloc_page_frag(nc, len, gfp_mask);
 +      data = __alloc_page_frag(&nc->page, len, gfp_mask);
        if (unlikely(!data))
                return NULL;
  
        }
  
        /* use OR instead of assignment to avoid clearing of bits in mask */
 -      if (nc->pfmemalloc)
 +      if (nc->page.pfmemalloc)
                skb->pfmemalloc = 1;
        skb->head_frag = 1;
  
@@@ -757,73 -749,6 +757,73 @@@ void consume_skb(struct sk_buff *skb
  }
  EXPORT_SYMBOL(consume_skb);
  
 +void __kfree_skb_flush(void)
 +{
 +      struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 +
 +      /* flush skb_cache if containing objects */
 +      if (nc->skb_count) {
 +              kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
 +                                   nc->skb_cache);
 +              nc->skb_count = 0;
 +      }
 +}
 +
 +static inline void _kfree_skb_defer(struct sk_buff *skb)
 +{
 +      struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 +
 +      /* drop skb->head and call any destructors for packet */
 +      skb_release_all(skb);
 +
 +      /* record skb to CPU local list */
 +      nc->skb_cache[nc->skb_count++] = skb;
 +
 +#ifdef CONFIG_SLUB
 +      /* SLUB writes into objects when freeing */
 +      prefetchw(skb);
 +#endif
 +
 +      /* flush skb_cache if it is filled */
 +      if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
 +              kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
 +                                   nc->skb_cache);
 +              nc->skb_count = 0;
 +      }
 +}
 +void __kfree_skb_defer(struct sk_buff *skb)
 +{
 +      _kfree_skb_defer(skb);
 +}
 +
 +void napi_consume_skb(struct sk_buff *skb, int budget)
 +{
 +      if (unlikely(!skb))
 +              return;
 +
 +      /* if budget is 0 assume netpoll w/ IRQs disabled */
 +      if (unlikely(!budget)) {
 +              dev_consume_skb_irq(skb);
 +              return;
 +      }
 +
 +      if (likely(atomic_read(&skb->users) == 1))
 +              smp_rmb();
 +      else if (likely(!atomic_dec_and_test(&skb->users)))
 +              return;
 +      /* if reaching here SKB is ready to free */
 +      trace_consume_skb(skb);
 +
 +      /* if SKB is a clone, don't handle this case */
 +      if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) {
 +              __kfree_skb(skb);
 +              return;
 +      }
 +
 +      _kfree_skb_defer(skb);
 +}
 +EXPORT_SYMBOL(napi_consume_skb);
 +
  /* Make sure a field is enclosed inside headers_start/headers_end section */
  #define CHECK_SKB_FIELD(field) \
        BUILD_BUG_ON(offsetof(struct sk_buff, field) <          \
@@@ -3022,6 -2947,24 +3022,24 @@@ int skb_append_pagefrags(struct sk_buf
  }
  EXPORT_SYMBOL_GPL(skb_append_pagefrags);
  
+ /**
+  *    skb_push_rcsum - push skb and update receive checksum
+  *    @skb: buffer to update
+  *    @len: length of data pulled
+  *
+  *    This function performs an skb_push on the packet and updates
+  *    the CHECKSUM_COMPLETE checksum.  It should be used on
+  *    receive path processing instead of skb_push unless you know
+  *    that the checksum difference is zero (e.g., a valid IP header)
+  *    or you are setting ip_summed to CHECKSUM_NONE.
+  */
+ static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
+ {
+       skb_push(skb, len);
+       skb_postpush_rcsum(skb, skb->data, len);
+       return skb->data;
+ }
  /**
   *    skb_pull_rcsum - pull skb and update receive checksum
   *    @skb: buffer to update
@@@ -3081,7 -3024,8 +3099,7 @@@ struct sk_buff *skb_segment(struct sk_b
        if (unlikely(!proto))
                return ERR_PTR(-EINVAL);
  
 -      csum = !head_skb->encap_hdr_csum &&
 -          !!can_checksum_protocol(features, proto);
 +      csum = !!can_checksum_protocol(features, proto);
  
        headroom = skb_headroom(head_skb);
        pos = skb_headlen(head_skb);
                if (nskb->len == len + doffset)
                        goto perform_csum_check;
  
 -              if (!sg && !nskb->remcsum_offload) {
 -                      nskb->ip_summed = CHECKSUM_NONE;
 -                      nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
 -                                                          skb_put(nskb, len),
 -                                                          len, 0);
 +              if (!sg) {
 +                      if (!nskb->remcsum_offload)
 +                              nskb->ip_summed = CHECKSUM_NONE;
 +                      SKB_GSO_CB(nskb)->csum =
 +                              skb_copy_and_csum_bits(head_skb, offset,
 +                                                     skb_put(nskb, len),
 +                                                     len, 0);
                        SKB_GSO_CB(nskb)->csum_start =
 -                          skb_headroom(nskb) + doffset;
 +                              skb_headroom(nskb) + doffset;
                        continue;
                }
  
@@@ -3248,19 -3190,12 +3266,19 @@@ skip_fraglist
                nskb->truesize += nskb->data_len;
  
  perform_csum_check:
 -              if (!csum && !nskb->remcsum_offload) {
 -                      nskb->csum = skb_checksum(nskb, doffset,
 -                                                nskb->len - doffset, 0);
 -                      nskb->ip_summed = CHECKSUM_NONE;
 +              if (!csum) {
 +                      if (skb_has_shared_frag(nskb)) {
 +                              err = __skb_linearize(nskb);
 +                              if (err)
 +                                      goto err;
 +                      }
 +                      if (!nskb->remcsum_offload)
 +                              nskb->ip_summed = CHECKSUM_NONE;
 +                      SKB_GSO_CB(nskb)->csum =
 +                              skb_checksum(nskb, doffset,
 +                                           nskb->len - doffset, 0);
                        SKB_GSO_CB(nskb)->csum_start =
 -                          skb_headroom(nskb) + doffset;
 +                              skb_headroom(nskb) + doffset;
                }
        } while ((offset += len) < head_skb->len);
  
@@@ -4167,9 -4102,9 +4185,9 @@@ struct sk_buff *skb_checksum_trimmed(st
        if (!pskb_may_pull(skb_chk, offset))
                goto err;
  
-       __skb_pull(skb_chk, offset);
+       skb_pull_rcsum(skb_chk, offset);
        ret = skb_chkf(skb_chk);
-       __skb_push(skb_chk, offset);
+       skb_push_rcsum(skb_chk, offset);
  
        if (ret)
                goto err;
@@@ -4302,6 -4237,7 +4320,6 @@@ void skb_scrub_packet(struct sk_buff *s
        skb->skb_iif = 0;
        skb->ignore_df = 0;
        skb_dst_drop(skb);
 -      skb_sender_cpu_clear(skb);
        secpath_reset(skb);
        nf_reset(skb);
        nf_reset_trace(skb);
@@@ -4497,7 -4433,9 +4515,7 @@@ int skb_vlan_push(struct sk_buff *skb, 
                skb->mac_len += VLAN_HLEN;
                __skb_pull(skb, offset);
  
 -              if (skb->ip_summed == CHECKSUM_COMPLETE)
 -                      skb->csum = csum_add(skb->csum, csum_partial(skb->data
 -                                      + (2 * ETH_ALEN), VLAN_HLEN, 0));
 +              skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
        }
        __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
        return 0;
diff --combined net/ipv4/igmp.c
index 2aea9f1a2a31bfe624d5bb1c56effe70e97fa88a,b3086cf2702759d1077cb6a014afbd130d8206db..9b4ca87f70bab4e0174b804fac30f949b27d7457
  #include <linux/seq_file.h>
  #endif
  
 -#define IP_MAX_MEMBERSHIPS    20
 -#define IP_MAX_MSF            10
 -
 -/* IGMP reports for link-local multicast groups are enabled by default */
 -int sysctl_igmp_llm_reports __read_mostly = 1;
 -
  #ifdef CONFIG_IP_MULTICAST
  /* Parameter names and values are taken from igmp-v2-06 draft */
  
@@@ -350,9 -356,8 +350,8 @@@ static struct sk_buff *igmpv3_newpack(s
        skb_dst_set(skb, &rt->dst);
        skb->dev = dev;
  
-       skb->reserved_tailroom = skb_end_offset(skb) -
-                                min(mtu, skb_end_offset(skb));
        skb_reserve(skb, hlen);
+       skb_tailroom_reserve(skb, mtu, tlen);
  
        skb_reset_network_header(skb);
        pip = ip_hdr(skb);
@@@ -427,7 -432,6 +426,7 @@@ static struct sk_buff *add_grec(struct 
        int type, int gdeleted, int sdeleted)
  {
        struct net_device *dev = pmc->interface->dev;
 +      struct net *net = dev_net(dev);
        struct igmpv3_report *pih;
        struct igmpv3_grec *pgr = NULL;
        struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
  
        if (pmc->multiaddr == IGMP_ALL_HOSTS)
                return skb;
 -      if (ipv4_is_local_multicast(pmc->multiaddr) && !sysctl_igmp_llm_reports)
 +      if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
                return skb;
  
        isquery = type == IGMPV3_MODE_IS_INCLUDE ||
@@@ -538,7 -542,6 +537,7 @@@ empty_source
  static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
  {
        struct sk_buff *skb = NULL;
 +      struct net *net = dev_net(in_dev->dev);
        int type;
  
        if (!pmc) {
                        if (pmc->multiaddr == IGMP_ALL_HOSTS)
                                continue;
                        if (ipv4_is_local_multicast(pmc->multiaddr) &&
 -                           !sysctl_igmp_llm_reports)
 +                           !net->ipv4.sysctl_igmp_llm_reports)
                                continue;
                        spin_lock_bh(&pmc->lock);
                        if (pmc->sfcount[MCAST_EXCLUDE])
@@@ -683,7 -686,7 +682,7 @@@ static int igmp_send_report(struct in_d
        if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
                return igmpv3_send_report(in_dev, pmc);
  
 -      if (ipv4_is_local_multicast(group) && !sysctl_igmp_llm_reports)
 +      if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports)
                return 0;
  
        if (type == IGMP_HOST_LEAVE_MESSAGE)
@@@ -762,10 -765,9 +761,10 @@@ static void igmp_ifc_timer_expire(unsig
  
  static void igmp_ifc_event(struct in_device *in_dev)
  {
 +      struct net *net = dev_net(in_dev->dev);
        if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
                return;
 -      in_dev->mr_ifc_count = in_dev->mr_qrv ?: sysctl_igmp_qrv;
 +      in_dev->mr_ifc_count = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
        igmp_ifc_start_timer(in_dev, 1);
  }
  
@@@ -855,13 -857,12 +854,13 @@@ static int igmp_marksources(struct ip_m
  static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
  {
        struct ip_mc_list *im;
 +      struct net *net = dev_net(in_dev->dev);
  
        /* Timers are only set for non-local groups */
  
        if (group == IGMP_ALL_HOSTS)
                return false;
 -      if (ipv4_is_local_multicast(group) && !sysctl_igmp_llm_reports)
 +      if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports)
                return false;
  
        rcu_read_lock();
@@@ -885,7 -886,6 +884,7 @@@ static bool igmp_heard_query(struct in_
        __be32                  group = ih->group;
        int                     max_delay;
        int                     mark = 0;
 +      struct net              *net = dev_net(in_dev->dev);
  
  
        if (len == 8) {
                if (im->multiaddr == IGMP_ALL_HOSTS)
                        continue;
                if (ipv4_is_local_multicast(im->multiaddr) &&
 -                  !sysctl_igmp_llm_reports)
 +                  !net->ipv4.sysctl_igmp_llm_reports)
                        continue;
                spin_lock_bh(&im->lock);
                if (im->tm_running)
@@@ -1087,7 -1087,6 +1086,7 @@@ static void ip_mc_filter_del(struct in_
  static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
  {
        struct ip_mc_list *pmc;
 +      struct net *net = dev_net(in_dev->dev);
  
        /* this is an "ip_mc_list" for convenience; only the fields below
         * are actually used. In particular, the refcnt and users are not
        pmc->interface = im->interface;
        in_dev_hold(in_dev);
        pmc->multiaddr = im->multiaddr;
 -      pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
 +      pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
        pmc->sfmode = im->sfmode;
        if (pmc->sfmode == MCAST_INCLUDE) {
                struct ip_sf_list *psf;
@@@ -1187,7 -1186,6 +1186,7 @@@ static void igmp_group_dropped(struct i
  {
        struct in_device *in_dev = im->interface;
  #ifdef CONFIG_IP_MULTICAST
 +      struct net *net = dev_net(in_dev->dev);
        int reporter;
  #endif
  
  #ifdef CONFIG_IP_MULTICAST
        if (im->multiaddr == IGMP_ALL_HOSTS)
                return;
 -      if (ipv4_is_local_multicast(im->multiaddr) && !sysctl_igmp_llm_reports)
 +      if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
                return;
  
        reporter = im->reporter;
  static void igmp_group_added(struct ip_mc_list *im)
  {
        struct in_device *in_dev = im->interface;
 +#ifdef CONFIG_IP_MULTICAST
 +      struct net *net = dev_net(in_dev->dev);
 +#endif
  
        if (im->loaded == 0) {
                im->loaded = 1;
  #ifdef CONFIG_IP_MULTICAST
        if (im->multiaddr == IGMP_ALL_HOSTS)
                return;
 -      if (ipv4_is_local_multicast(im->multiaddr) && !sysctl_igmp_llm_reports)
 +      if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
                return;
  
        if (in_dev->dead)
        }
        /* else, v3 */
  
 -      im->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
 +      im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
        igmp_ifc_event(in_dev);
  #endif
  }
@@@ -1318,9 -1313,6 +1317,9 @@@ static void ip_mc_hash_remove(struct in
  void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
  {
        struct ip_mc_list *im;
 +#ifdef CONFIG_IP_MULTICAST
 +      struct net *net = dev_net(in_dev->dev);
 +#endif
  
        ASSERT_RTNL();
  
        spin_lock_init(&im->lock);
  #ifdef CONFIG_IP_MULTICAST
        setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im);
 -      im->unsolicit_count = sysctl_igmp_qrv;
 +      im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
  #endif
  
        im->next_rcu = in_dev->mc_list;
@@@ -1540,7 -1532,6 +1539,7 @@@ static void ip_mc_rejoin_groups(struct 
  #ifdef CONFIG_IP_MULTICAST
        struct ip_mc_list *im;
        int type;
 +      struct net *net = dev_net(in_dev->dev);
  
        ASSERT_RTNL();
  
                if (im->multiaddr == IGMP_ALL_HOSTS)
                        continue;
                if (ipv4_is_local_multicast(im->multiaddr) &&
 -                  !sysctl_igmp_llm_reports)
 +                  !net->ipv4.sysctl_igmp_llm_reports)
                        continue;
  
                /* a failover is happening and switches
@@@ -1647,9 -1638,6 +1646,9 @@@ void ip_mc_down(struct in_device *in_de
  
  void ip_mc_init_dev(struct in_device *in_dev)
  {
 +#ifdef CONFIG_IP_MULTICAST
 +      struct net *net = dev_net(in_dev->dev);
 +#endif
        ASSERT_RTNL();
  
  #ifdef CONFIG_IP_MULTICAST
                        (unsigned long)in_dev);
        setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
                        (unsigned long)in_dev);
 -      in_dev->mr_qrv = sysctl_igmp_qrv;
 +      in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
  #endif
  
        spin_lock_init(&in_dev->mc_tomb_lock);
  void ip_mc_up(struct in_device *in_dev)
  {
        struct ip_mc_list *pmc;
 +#ifdef CONFIG_IP_MULTICAST
 +      struct net *net = dev_net(in_dev->dev);
 +#endif
  
        ASSERT_RTNL();
  
  #ifdef CONFIG_IP_MULTICAST
 -      in_dev->mr_qrv = sysctl_igmp_qrv;
 +      in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
  #endif
        ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
  
@@@ -1741,6 -1726,11 +1740,6 @@@ static struct in_device *ip_mc_find_dev
  /*
   *    Join a socket to a group
   */
 -int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS;
 -int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF;
 -#ifdef CONFIG_IP_MULTICAST
 -int sysctl_igmp_qrv __read_mostly = IGMP_QUERY_ROBUSTNESS_VARIABLE;
 -#endif
  
  static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
        __be32 *psfsrc)
        if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
  #ifdef CONFIG_IP_MULTICAST
                struct in_device *in_dev = pmc->interface;
 +              struct net *net = dev_net(in_dev->dev);
  #endif
  
                /* no more filters for this source */
  #ifdef CONFIG_IP_MULTICAST
                if (psf->sf_oldin &&
                    !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
 -                      psf->sf_crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
 +                      psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
                        psf->sf_next = pmc->tomb;
                        pmc->tomb = psf;
                        rv = 1;
@@@ -1834,13 -1823,12 +1833,13 @@@ static int ip_mc_del_src(struct in_devi
            pmc->sfcount[MCAST_INCLUDE]) {
  #ifdef CONFIG_IP_MULTICAST
                struct ip_sf_list *psf;
 +              struct net *net = dev_net(in_dev->dev);
  #endif
  
                /* filter mode change */
                pmc->sfmode = MCAST_INCLUDE;
  #ifdef CONFIG_IP_MULTICAST
 -              pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
 +              pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
                in_dev->mr_ifc_count = pmc->crcount;
                for (psf = pmc->sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = 0;
@@@ -2007,7 -1995,6 +2006,7 @@@ static int ip_mc_add_src(struct in_devi
        } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
  #ifdef CONFIG_IP_MULTICAST
                struct ip_sf_list *psf;
 +              struct net *net = dev_net(pmc->interface->dev);
                in_dev = pmc->interface;
  #endif
  
  #ifdef CONFIG_IP_MULTICAST
                /* else no filters; keep old mode for reports */
  
 -              pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
 +              pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
                in_dev->mr_ifc_count = pmc->crcount;
                for (psf = pmc->sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = 0;
@@@ -2086,7 -2073,7 +2085,7 @@@ int ip_mc_join_group(struct sock *sk, s
                count++;
        }
        err = -ENOBUFS;
 -      if (count >= sysctl_igmp_max_memberships)
 +      if (count >= net->ipv4.sysctl_igmp_max_memberships)
                goto done;
        iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
        if (!iml)
@@@ -2258,7 -2245,7 +2257,7 @@@ int ip_mc_source(int add, int omode, st
        }
        /* else, add a new source to the filter */
  
 -      if (psl && psl->sl_count >= sysctl_igmp_max_msf) {
 +      if (psl && psl->sl_count >= net->ipv4.sysctl_igmp_max_msf) {
                err = -ENOBUFS;
                goto done;
        }
@@@ -2931,12 -2918,6 +2930,12 @@@ static int __net_init igmp_net_init(str
                goto out_sock;
        }
  
 +      /* Sysctl initialization */
 +      net->ipv4.sysctl_igmp_max_memberships = 20;
 +      net->ipv4.sysctl_igmp_max_msf = 10;
 +      /* IGMP reports for link-local multicast groups are enabled by default */
 +      net->ipv4.sysctl_igmp_llm_reports = 1;
 +      net->ipv4.sysctl_igmp_qrv = 2;
        return 0;
  
  out_sock:
diff --combined net/ipv4/ip_output.c
index f734c42acdaf960e7bfdbae050fb64f4108b66d6,565bf64b2b7d6047a29e69df00fb3b85ec84f06a..124bf0a663283502deb03397343160d493a378b1
@@@ -79,6 -79,9 +79,6 @@@
  #include <linux/netlink.h>
  #include <linux/tcp.h>
  
 -int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
 -EXPORT_SYMBOL(sysctl_ip_default_ttl);
 -
  static int
  ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
            unsigned int mtu,
@@@ -1233,13 -1236,16 +1233,16 @@@ ssize_t      ip_append_page(struct sock *sk
        if (!skb)
                return -EINVAL;
  
-       cork->length += size;
        if ((size + skb->len > mtu) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO)) {
+               if (skb->ip_summed != CHECKSUM_PARTIAL)
+                       return -EOPNOTSUPP;
                skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
                skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
        }
+       cork->length += size;
  
        while (size > 0) {
                if (skb_is_gso(skb)) {
diff --combined net/ipv4/ip_tunnel.c
index dff8a05739a289d20844cd91ae21350fa689c47d,336e6892a93ce99aabf1794133c9567ee0effde7..6aad0192443d49966785f0de67ee30934775dfca
@@@ -68,6 -68,61 +68,6 @@@ static unsigned int ip_tunnel_hash(__be
                         IP_TNL_HASH_BITS);
  }
  
 -static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
 -                           struct dst_entry *dst, __be32 saddr)
 -{
 -      struct dst_entry *old_dst;
 -
 -      dst_clone(dst);
 -      old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
 -      dst_release(old_dst);
 -      idst->saddr = saddr;
 -}
 -
 -static noinline void tunnel_dst_set(struct ip_tunnel *t,
 -                         struct dst_entry *dst, __be32 saddr)
 -{
 -      __tunnel_dst_set(raw_cpu_ptr(t->dst_cache), dst, saddr);
 -}
 -
 -static void tunnel_dst_reset(struct ip_tunnel *t)
 -{
 -      tunnel_dst_set(t, NULL, 0);
 -}
 -
 -void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
 -{
 -      int i;
 -
 -      for_each_possible_cpu(i)
 -              __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0);
 -}
 -EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
 -
 -static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
 -                                      u32 cookie, __be32 *saddr)
 -{
 -      struct ip_tunnel_dst *idst;
 -      struct dst_entry *dst;
 -
 -      rcu_read_lock();
 -      idst = raw_cpu_ptr(t->dst_cache);
 -      dst = rcu_dereference(idst->dst);
 -      if (dst && !atomic_inc_not_zero(&dst->__refcnt))
 -              dst = NULL;
 -      if (dst) {
 -              if (!dst->obsolete || dst->ops->check(dst, cookie)) {
 -                      *saddr = idst->saddr;
 -              } else {
 -                      tunnel_dst_reset(t);
 -                      dst_release(dst);
 -                      dst = NULL;
 -              }
 -      }
 -      rcu_read_unlock();
 -      return (struct rtable *)dst;
 -}
 -
  static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
                                __be16 flags, __be32 key)
  {
@@@ -326,8 -381,7 +326,8 @@@ static int ip_tunnel_bind_dev(struct ne
  
                if (!IS_ERR(rt)) {
                        tdev = rt->dst.dev;
 -                      tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
 +                      dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
 +                                        fl4.saddr);
                        ip_rt_put(rt);
                }
                if (dev->type != ARPHRD_ETHER)
@@@ -607,6 -661,8 +607,8 @@@ void ip_tunnel_xmit(struct sk_buff *skb
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
        connected = (tunnel->parms.iph.daddr != 0);
  
+       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        dst = tnl_params->daddr;
        if (dst == 0) {
                /* NBMA tunnel */
        if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
                goto tx_error;
  
 -      rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL;
 +      rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache, &fl4.saddr) :
 +                       NULL;
  
        if (!rt) {
                rt = ip_route_output_key(tunnel->net, &fl4);
                        goto tx_error;
                }
                if (connected)
 -                      tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
 +                      dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
 +                                        fl4.saddr);
        }
  
        if (rt->dst.dev == dev) {
                                tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
                        tunnel->err_count--;
  
-                       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
                        dst_link_failure(skb);
                } else
                        tunnel->err_count = 0;
@@@ -784,7 -837,7 +785,7 @@@ static void ip_tunnel_update(struct ip_
                if (set_mtu)
                        dev->mtu = mtu;
        }
 -      ip_tunnel_dst_reset_all(t);
 +      dst_cache_reset(&t->dst_cache);
        netdev_state_change(dev);
  }
  
@@@ -923,7 -976,7 +924,7 @@@ static void ip_tunnel_dev_free(struct n
        struct ip_tunnel *tunnel = netdev_priv(dev);
  
        gro_cells_destroy(&tunnel->gro_cells);
 -      free_percpu(tunnel->dst_cache);
 +      dst_cache_destroy(&tunnel->dst_cache);
        free_percpu(dev->tstats);
        free_netdev(dev);
  }
@@@ -1117,15 -1170,15 +1118,15 @@@ int ip_tunnel_init(struct net_device *d
        if (!dev->tstats)
                return -ENOMEM;
  
 -      tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
 -      if (!tunnel->dst_cache) {
 +      err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
 +      if (err) {
                free_percpu(dev->tstats);
 -              return -ENOMEM;
 +              return err;
        }
  
        err = gro_cells_init(&tunnel->gro_cells, dev);
        if (err) {
 -              free_percpu(tunnel->dst_cache);
 +              dst_cache_destroy(&tunnel->dst_cache);
                free_percpu(dev->tstats);
                return err;
        }
@@@ -1155,7 -1208,7 +1156,7 @@@ void ip_tunnel_uninit(struct net_devic
        if (itn->fb_tunnel_dev != dev)
                ip_tunnel_del(itn, netdev_priv(dev));
  
 -      ip_tunnel_dst_reset_all(tunnel);
 +      dst_cache_reset(&tunnel->dst_cache);
  }
  EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
  
diff --combined net/ipv4/tcp_metrics.c
index c26241f3057b18d8e1d2aaece7d1e2f7f8399462,a726d7853ce53fe03b48b199ca909c02393cabcf..7b7eec4399069249ef949ef4b287e8cd0ef4df91
@@@ -369,7 -369,6 +369,7 @@@ void tcp_update_metrics(struct sock *sk
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct dst_entry *dst = __sk_dst_get(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 +      struct net *net = sock_net(sk);
        struct tcp_metrics_block *tm;
        unsigned long rtt;
        u32 val;
                if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
                        val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
                        if (val < tp->reordering &&
 -                          tp->reordering != sysctl_tcp_reordering)
 +                          tp->reordering != net->ipv4.sysctl_tcp_reordering)
                                tcp_metric_set(tm, TCP_METRIC_REORDERING,
                                               tp->reordering);
                }
@@@ -551,7 -550,7 +551,7 @@@ reset
         */
        if (crtt > tp->srtt_us) {
                /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
-               crtt /= 8 * USEC_PER_MSEC;
+               crtt /= 8 * USEC_PER_SEC / HZ;
                inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
        } else if (tp->srtt_us == 0) {
                /* RFC6298: 5.7 We've failed to get a valid RTT sample from
diff --combined net/ipv4/tcp_minisocks.c
index fadd8b978951817f75402af7a12f2b5681b00462,9b02af2139d3d3245ea952f643fc52e0ee92a9ab..ae90e4b34bd3c656850a79131b22febfb41cb9ad
@@@ -27,6 -27,9 +27,6 @@@
  #include <net/inet_common.h>
  #include <net/xfrm.h>
  
 -int sysctl_tcp_syncookies __read_mostly = 1;
 -EXPORT_SYMBOL(sysctl_tcp_syncookies);
 -
  int sysctl_tcp_abort_on_overflow __read_mostly;
  
  struct inet_timewait_death_row tcp_death_row = {
@@@ -452,7 -455,7 +452,7 @@@ struct sock *tcp_create_openreq_child(c
  
                newtp->rcv_wup = newtp->copied_seq =
                newtp->rcv_nxt = treq->rcv_isn + 1;
-               newtp->segs_in = 0;
+               newtp->segs_in = 1;
  
                newtp->snd_sml = newtp->snd_una =
                newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
@@@ -812,6 -815,7 +812,7 @@@ int tcp_child_process(struct sock *pare
        int ret = 0;
        int state = child->sk_state;
  
+       tcp_sk(child)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
        if (!sock_owned_by_user(child)) {
                ret = tcp_rcv_state_process(child, skb);
                /* Wakeup parent, send SIGIO */
diff --combined net/ipv6/ip6_gre.c
index f7c9560b75facde132e394f54ef53d9598fbae2b,c0d4dc1c5ea4c31d8ebb4512bb55fcd177ce9f4e..4e636e60a360d5c22dc856c2c80d361127ba41b6
@@@ -360,7 -360,7 +360,7 @@@ static void ip6gre_tunnel_uninit(struc
        struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
  
        ip6gre_tunnel_unlink(ign, t);
 -      ip6_tnl_dst_reset(t);
 +      dst_cache_reset(&t->dst_cache);
        dev_put(dev);
  }
  
@@@ -633,7 -633,7 +633,7 @@@ static netdev_tx_t ip6gre_xmit2(struct 
        }
  
        if (!fl6->flowi6_mark)
 -              dst = ip6_tnl_dst_get(tunnel);
 +              dst = dst_cache_get(&tunnel->dst_cache);
  
        if (!dst) {
                dst = ip6_route_output(net, NULL, fl6);
        }
  
        if (!fl6->flowi6_mark && ndst)
 -              ip6_tnl_dst_set(tunnel, ndst);
 +              dst_cache_set_ip6(&tunnel->dst_cache, ndst, &fl6->saddr);
        skb_dst_set(skb, dst);
  
        proto = NEXTHDR_GRE;
@@@ -777,6 -777,8 +777,8 @@@ static inline int ip6gre_xmit_ipv4(stru
        __u32 mtu;
        int err;
  
+       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                encap_limit = t->parms.encap_limit;
  
@@@ -1009,7 -1011,7 +1011,7 @@@ static int ip6gre_tnl_change(struct ip6
        t->parms.o_key = p->o_key;
        t->parms.i_flags = p->i_flags;
        t->parms.o_flags = p->o_flags;
 -      ip6_tnl_dst_reset(t);
 +      dst_cache_reset(&t->dst_cache);
        ip6gre_tnl_link_config(t, set_mtu);
        return 0;
  }
@@@ -1219,7 -1221,7 +1221,7 @@@ static void ip6gre_dev_free(struct net_
  {
        struct ip6_tnl *t = netdev_priv(dev);
  
 -      ip6_tnl_dst_destroy(t);
 +      dst_cache_destroy(&t->dst_cache);
        free_percpu(dev->tstats);
        free_netdev(dev);
  }
@@@ -1257,7 -1259,7 +1259,7 @@@ static int ip6gre_tunnel_init_common(st
        if (!dev->tstats)
                return -ENOMEM;
  
 -      ret = ip6_tnl_dst_init(tunnel);
 +      ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
        if (ret) {
                free_percpu(dev->tstats);
                dev->tstats = NULL;
diff --combined net/ipv6/ip6_tunnel.c
index 3f3aabd2f07b8dfe624e3035d5f3743078f36aff,6c5dfec7a3779601eb760eeeb975ae5e6db00bb7..eb2ac4bb09ce0fb0f9afb3fcc142aad80270a6cb
@@@ -122,6 -122,97 +122,6 @@@ static struct net_device_stats *ip6_get
        return &dev->stats;
  }
  
 -/*
 - * Locking : hash tables are protected by RCU and RTNL
 - */
 -
 -static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
 -                                  struct dst_entry *dst)
 -{
 -      write_seqlock_bh(&idst->lock);
 -      dst_release(rcu_dereference_protected(
 -                          idst->dst,
 -                          lockdep_is_held(&idst->lock.lock)));
 -      if (dst) {
 -              dst_hold(dst);
 -              idst->cookie = rt6_get_cookie((struct rt6_info *)dst);
 -      } else {
 -              idst->cookie = 0;
 -      }
 -      rcu_assign_pointer(idst->dst, dst);
 -      write_sequnlock_bh(&idst->lock);
 -}
 -
 -struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
 -{
 -      struct ip6_tnl_dst *idst;
 -      struct dst_entry *dst;
 -      unsigned int seq;
 -      u32 cookie;
 -
 -      idst = raw_cpu_ptr(t->dst_cache);
 -
 -      rcu_read_lock();
 -      do {
 -              seq = read_seqbegin(&idst->lock);
 -              dst = rcu_dereference(idst->dst);
 -              cookie = idst->cookie;
 -      } while (read_seqretry(&idst->lock, seq));
 -
 -      if (dst && !atomic_inc_not_zero(&dst->__refcnt))
 -              dst = NULL;
 -      rcu_read_unlock();
 -
 -      if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) {
 -              ip6_tnl_per_cpu_dst_set(idst, NULL);
 -              dst_release(dst);
 -              dst = NULL;
 -      }
 -      return dst;
 -}
 -EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
 -
 -void ip6_tnl_dst_reset(struct ip6_tnl *t)
 -{
 -      int i;
 -
 -      for_each_possible_cpu(i)
 -              ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
 -}
 -EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
 -
 -void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst)
 -{
 -      ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), dst);
 -
 -}
 -EXPORT_SYMBOL_GPL(ip6_tnl_dst_set);
 -
 -void ip6_tnl_dst_destroy(struct ip6_tnl *t)
 -{
 -      if (!t->dst_cache)
 -              return;
 -
 -      ip6_tnl_dst_reset(t);
 -      free_percpu(t->dst_cache);
 -}
 -EXPORT_SYMBOL_GPL(ip6_tnl_dst_destroy);
 -
 -int ip6_tnl_dst_init(struct ip6_tnl *t)
 -{
 -      int i;
 -
 -      t->dst_cache = alloc_percpu(struct ip6_tnl_dst);
 -      if (!t->dst_cache)
 -              return -ENOMEM;
 -
 -      for_each_possible_cpu(i)
 -              seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
 -
 -      return 0;
 -}
 -EXPORT_SYMBOL_GPL(ip6_tnl_dst_init);
 -
  /**
   * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
   *   @remote: the address of the tunnel exit-point
@@@ -238,7 -329,7 +238,7 @@@ static void ip6_dev_free(struct net_dev
  {
        struct ip6_tnl *t = netdev_priv(dev);
  
 -      ip6_tnl_dst_destroy(t);
 +      dst_cache_destroy(&t->dst_cache);
        free_percpu(dev->tstats);
        free_netdev(dev);
  }
@@@ -371,7 -462,7 +371,7 @@@ ip6_tnl_dev_uninit(struct net_device *d
                RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
        else
                ip6_tnl_unlink(ip6n, t);
 -      ip6_tnl_dst_reset(t);
 +      dst_cache_reset(&t->dst_cache);
        dev_put(dev);
  }
  
@@@ -978,7 -1069,7 +978,7 @@@ static int ip6_tnl_xmit2(struct sk_buf
                memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
                neigh_release(neigh);
        } else if (!fl6->flowi6_mark)
 -              dst = ip6_tnl_dst_get(t);
 +              dst = dst_cache_get(&t->dst_cache);
  
        if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
                goto tx_err_link_failure;
        }
  
        if (!fl6->flowi6_mark && ndst)
 -              ip6_tnl_dst_set(t, ndst);
 +              dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
        skb_dst_set(skb, dst);
  
        skb->transport_header = skb->network_header;
@@@ -1089,6 -1180,8 +1089,8 @@@ ip4ip6_tnl_xmit(struct sk_buff *skb, st
        u8 tproto;
        int err;
  
+       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        tproto = ACCESS_ONCE(t->parms.proto);
        if (tproto != IPPROTO_IPIP && tproto != 0)
                return -1;
@@@ -1275,7 -1368,7 +1277,7 @@@ ip6_tnl_change(struct ip6_tnl *t, cons
        t->parms.flowinfo = p->flowinfo;
        t->parms.link = p->link;
        t->parms.proto = p->proto;
 -      ip6_tnl_dst_reset(t);
 +      dst_cache_reset(&t->dst_cache);
        ip6_tnl_link_config(t);
        return 0;
  }
@@@ -1546,7 -1639,7 +1548,7 @@@ ip6_tnl_dev_init_gen(struct net_device 
        if (!dev->tstats)
                return -ENOMEM;
  
 -      ret = ip6_tnl_dst_init(t);
 +      ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
        if (ret) {
                free_percpu(dev->tstats);
                dev->tstats = NULL;
diff --combined net/ipv6/udp.c
index 0711f8fe4d44f52950a86c565df68cd61134953f,422dd014aa2ce9b27263eed4612ff156271d982f..fd25e447a5fa3fb5590f5979a43414ea9c85f079
@@@ -37,7 -37,6 +37,7 @@@
  #include <linux/slab.h>
  #include <asm/uaccess.h>
  
 +#include <net/addrconf.h>
  #include <net/ndisc.h>
  #include <net/protocol.h>
  #include <net/transp_v6.h>
@@@ -78,6 -77,49 +78,6 @@@ static u32 udp6_ehashfn(const struct ne
                               udp_ipv6_hash_secret + net_hash_mix(net));
  }
  
 -/* match_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
 - *                          only, and any IPv4 addresses if not IPv6 only
 - * match_wildcard == false: addresses must be exactly the same, i.e.
 - *                          IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
 - *                          and 0.0.0.0 equals to 0.0.0.0 only
 - */
 -int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
 -                       bool match_wildcard)
 -{
 -      const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
 -      int sk2_ipv6only = inet_v6_ipv6only(sk2);
 -      int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
 -      int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
 -
 -      /* if both are mapped, treat as IPv4 */
 -      if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
 -              if (!sk2_ipv6only) {
 -                      if (sk->sk_rcv_saddr == sk2->sk_rcv_saddr)
 -                              return 1;
 -                      if (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr)
 -                              return match_wildcard;
 -              }
 -              return 0;
 -      }
 -
 -      if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
 -              return 1;
 -
 -      if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
 -          !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
 -              return 1;
 -
 -      if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
 -          !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED))
 -              return 1;
 -
 -      if (sk2_rcv_saddr6 &&
 -          ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
 -              return 1;
 -
 -      return 0;
 -}
 -
  static u32 udp6_portaddr_hash(const struct net *net,
                              const struct in6_addr *addr6,
                              unsigned int port)
@@@ -548,7 -590,6 +548,7 @@@ void __udp6_lib_err(struct sk_buff *skb
        const struct in6_addr *daddr = &hdr->daddr;
        struct udphdr *uh = (struct udphdr *)(skb->data+offset);
        struct sock *sk;
 +      int harderr;
        int err;
        struct net *net = dev_net(skb->dev);
  
                return;
        }
  
 +      harderr = icmpv6_err_convert(type, code, &err);
 +      np = inet6_sk(sk);
 +
        if (type == ICMPV6_PKT_TOOBIG) {
                if (!ip6_sk_accept_pmtu(sk))
                        goto out;
                ip6_sk_update_pmtu(skb, sk, info);
 +              if (np->pmtudisc != IPV6_PMTUDISC_DONT)
 +                      harderr = 1;
        }
        if (type == NDISC_REDIRECT) {
                ip6_sk_redirect(skb, sk);
                goto out;
        }
  
 -      np = inet6_sk(sk);
 -
 -      if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
 -              goto out;
 -
 -      if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
 -              goto out;
 -
 -      if (np->recverr)
 +      if (!np->recverr) {
 +              if (!harderr || sk->sk_state != TCP_ESTABLISHED)
 +                      goto out;
 +      } else {
                ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
 +      }
  
        sk->sk_err = err;
        sk->sk_error_report(sk);
@@@ -922,11 -962,9 +922,9 @@@ int __udp6_lib_rcv(struct sk_buff *skb
                ret = udpv6_queue_rcv_skb(sk, skb);
                sock_put(sk);
  
-               /* a return value > 0 means to resubmit the input, but
-                * it wants the return to be -protocol, or 0
-                */
+               /* a return value > 0 means to resubmit the input */
                if (ret > 0)
-                       return -ret;
+                       return ret;
  
                return 0;
        }
diff --combined net/mac80211/agg-rx.c
index 1b8a5caa221eb6a2554a95fdfb2a53ea7f17d9c3,367784be5df20f26fd3940c4608f1ea715bfddd6..3a8f881b22f10c983ab354a4393157e090fd6d3a
@@@ -7,7 -7,6 +7,7 @@@
   * Copyright 2006-2007        Jiri Benc <jbenc@suse.cz>
   * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
   * Copyright 2007-2010, Intel Corporation
 + * Copyright(c) 2015 Intel Deutschland GmbH
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
@@@ -62,25 -61,16 +62,25 @@@ void ___ieee80211_stop_rx_ba_session(st
  {
        struct ieee80211_local *local = sta->local;
        struct tid_ampdu_rx *tid_rx;
 +      struct ieee80211_ampdu_params params = {
 +              .sta = &sta->sta,
 +              .action = IEEE80211_AMPDU_RX_STOP,
 +              .tid = tid,
 +              .amsdu = false,
 +              .timeout = 0,
 +              .ssn = 0,
 +      };
  
        lockdep_assert_held(&sta->ampdu_mlme.mtx);
  
        tid_rx = rcu_dereference_protected(sta->ampdu_mlme.tid_rx[tid],
                                        lockdep_is_held(&sta->ampdu_mlme.mtx));
  
 -      if (!tid_rx)
 +      if (!test_bit(tid, sta->ampdu_mlme.agg_session_valid))
                return;
  
        RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
 +      __clear_bit(tid, sta->ampdu_mlme.agg_session_valid);
  
        ht_dbg(sta->sdata,
               "Rx BA session stop requested for %pM tid %u %s reason: %d\n",
@@@ -88,7 -78,8 +88,7 @@@
               initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
               (int)reason);
  
 -      if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
 -                           &sta->sta, tid, NULL, 0, false))
 +      if (drv_ampdu_action(local, sta->sdata, &params))
                sdata_info(sta->sdata,
                           "HW problem - can not stop rx aggregation for %pM tid %d\n",
                           sta->sta.addr, tid);
                ieee80211_send_delba(sta->sdata, sta->sta.addr,
                                     tid, WLAN_BACK_RECIPIENT, reason);
  
 +      /*
 +       * return here in case tid_rx is not assigned - which will happen if
 +       * IEEE80211_HW_SUPPORTS_REORDERING_BUFFER is set.
 +       */
 +      if (!tid_rx)
 +              return;
 +
        del_timer_sync(&tid_rx->session_timer);
  
        /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
@@@ -253,15 -237,6 +253,15 @@@ void __ieee80211_start_rx_ba_session(st
  {
        struct ieee80211_local *local = sta->sdata->local;
        struct tid_ampdu_rx *tid_agg_rx;
 +      struct ieee80211_ampdu_params params = {
 +              .sta = &sta->sta,
 +              .action = IEEE80211_AMPDU_RX_START,
 +              .tid = tid,
 +              .amsdu = false,
 +              .timeout = timeout,
 +              .ssn = start_seq_num,
 +      };
 +
        int i, ret = -EOPNOTSUPP;
        u16 status = WLAN_STATUS_REQUEST_DECLINED;
  
        /* make sure the size doesn't exceed the maximum supported by the hw */
        if (buf_size > local->hw.max_rx_aggregation_subframes)
                buf_size = local->hw.max_rx_aggregation_subframes;
 +      params.buf_size = buf_size;
  
        /* examine state machine */
        mutex_lock(&sta->ampdu_mlme.mtx);
  
 -      if (sta->ampdu_mlme.tid_rx[tid]) {
 +      if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) {
                ht_dbg_ratelimited(sta->sdata,
                                   "unexpected AddBA Req from %pM on tid %u\n",
                                   sta->sta.addr, tid);
                                                false);
        }
  
 +      if (ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) {
 +              ret = drv_ampdu_action(local, sta->sdata, &params);
 +              ht_dbg(sta->sdata,
 +                     "Rx A-MPDU request on %pM tid %d result %d\n",
 +                     sta->sta.addr, tid, ret);
 +              if (!ret)
 +                      status = WLAN_STATUS_SUCCESS;
 +              goto end;
 +      }
 +
        /* prepare A-MPDU MLME for Rx aggregation */
-       tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
+       tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL);
        if (!tid_agg_rx)
                goto end;
  
        for (i = 0; i < buf_size; i++)
                __skb_queue_head_init(&tid_agg_rx->reorder_buf[i]);
  
 -      ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
 -                             &sta->sta, tid, &start_seq_num, 0, false);
 +      ret = drv_ampdu_action(local, sta->sdata, &params);
        ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
               sta->sta.addr, tid, ret);
        if (ret) {
        tid_agg_rx->timeout = timeout;
        tid_agg_rx->stored_mpdu_num = 0;
        tid_agg_rx->auto_seq = auto_seq;
 +      tid_agg_rx->reorder_buf_filtered = 0;
        status = WLAN_STATUS_SUCCESS;
  
        /* activate it for RX */
        }
  
  end:
 +      if (status == WLAN_STATUS_SUCCESS)
 +              __set_bit(tid, sta->ampdu_mlme.agg_session_valid);
        mutex_unlock(&sta->ampdu_mlme.mtx);
  
  end_no_lock:
index 1630975c89f159c613668692290450a7d60ed9ab,f006f4a44c0e6fbcdec025dcaa5450d194403bd9..804575ff7af506e73ef50d6ebc32326219b99082
@@@ -92,7 -92,7 +92,7 @@@ struct ieee80211_fragment_entry 
        u16 extra_len;
        u16 last_frag;
        u8 rx_queue;
-       bool ccmp; /* Whether fragments were encrypted with CCMP */
+       bool check_sequential_pn; /* needed for CCMP/GCMP */
        u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
  };
  
@@@ -716,6 -716,7 +716,6 @@@ struct ieee80211_if_mesh 
   *    back to wireless media and to the local net stack.
   * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
   * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
 - * @IEEE80211_SDATA_MU_MIMO_OWNER: indicates interface owns MU-MIMO capability
   */
  enum ieee80211_sub_if_data_flags {
        IEEE80211_SDATA_ALLMULTI                = BIT(0),
        IEEE80211_SDATA_DONT_BRIDGE_PACKETS     = BIT(3),
        IEEE80211_SDATA_DISCONNECT_RESUME       = BIT(4),
        IEEE80211_SDATA_IN_DRIVER               = BIT(5),
 -      IEEE80211_SDATA_MU_MIMO_OWNER           = BIT(6),
  };
  
  /**
@@@ -802,7 -804,6 +802,7 @@@ enum txq_info_flags 
  struct txq_info {
        struct sk_buff_head queue;
        unsigned long flags;
 +      unsigned long byte_cnt;
  
        /* keep last! */
        struct ieee80211_txq txq;
@@@ -1465,13 -1466,7 +1465,13 @@@ ieee80211_have_rx_timestamp(struct ieee
  {
        WARN_ON_ONCE(status->flag & RX_FLAG_MACTIME_START &&
                     status->flag & RX_FLAG_MACTIME_END);
 -      return status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END);
 +      if (status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END))
 +              return true;
 +      /* can't handle HT/VHT preamble yet */
 +      if (status->flag & RX_FLAG_MACTIME_PLCP_START &&
 +          !(status->flag & (RX_FLAG_HT | RX_FLAG_VHT)))
 +              return true;
 +      return false;
  }
  
  u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
@@@ -1719,8 -1714,6 +1719,8 @@@ ieee80211_vht_cap_ie_to_sta_vht_cap(str
  enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
  enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
  void ieee80211_sta_set_rx_nss(struct sta_info *sta);
 +void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
 +                               struct ieee80211_mgmt *mgmt);
  u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                    struct sta_info *sta, u8 opmode,
                                  enum ieee80211_band band);
@@@ -1836,6 -1829,20 +1836,6 @@@ static inline void ieee802_11_parse_ele
        ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
  }
  
 -static inline bool ieee80211_rx_reorder_ready(struct sk_buff_head *frames)
 -{
 -      struct sk_buff *tail = skb_peek_tail(frames);
 -      struct ieee80211_rx_status *status;
 -
 -      if (!tail)
 -              return false;
 -
 -      status = IEEE80211_SKB_RXCB(tail);
 -      if (status->flag & RX_FLAG_AMSDU_MORE)
 -              return false;
 -
 -      return true;
 -}
  
  extern const int ieee802_1d_to_ac[8];
  
@@@ -1979,10 -1986,12 +1979,10 @@@ int ieee80211_add_ext_srates_ie(struct 
  u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
  
  /* channel management */
 -void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
 -                                const struct ieee80211_ht_operation *ht_oper,
 -                                struct cfg80211_chan_def *chandef);
 -void ieee80211_vht_oper_to_chandef(struct ieee80211_channel *control_chan,
 -                                 const struct ieee80211_vht_operation *oper,
 -                                 struct cfg80211_chan_def *chandef);
 +bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
 +                             struct cfg80211_chan_def *chandef);
 +bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper,
 +                              struct cfg80211_chan_def *chandef);
  u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
  
  int __must_check
diff --combined net/mac80211/rx.c
index 5690e4c67486b88a23bb9724f6cfeb6ce3181410,60d093f40f1d16de32b1bb962b25ddb12f9a1b72..dc27becb9b71ec6ab3c2f0b94b57c278296f8157
@@@ -4,7 -4,6 +4,7 @@@
   * Copyright 2006-2007        Jiri Benc <jbenc@suse.cz>
   * Copyright 2007-2010        Johannes Berg <johannes@sipsolutions.net>
   * Copyright 2013-2014  Intel Mobile Communications GmbH
 + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
@@@ -19,7 -18,6 +19,7 @@@
  #include <linux/etherdevice.h>
  #include <linux/rcupdate.h>
  #include <linux/export.h>
 +#include <linux/bitops.h>
  #include <net/mac80211.h>
  #include <net/ieee80211_radiotap.h>
  #include <asm/unaligned.h>
@@@ -124,8 -122,7 +124,8 @@@ static inline bool should_drop_frame(st
        hdr = (void *)(skb->data + rtap_vendor_space);
  
        if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
 -                          RX_FLAG_FAILED_PLCP_CRC))
 +                          RX_FLAG_FAILED_PLCP_CRC |
 +                          RX_FLAG_ONLY_MONITOR))
                return true;
  
        if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
@@@ -510,7 -507,7 +510,7 @@@ ieee80211_rx_monitor(struct ieee80211_l
                return NULL;
        }
  
 -      if (!local->monitors) {
 +      if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
                if (should_drop_frame(origskb, present_fcs_len,
                                      rtap_vendor_space)) {
                        dev_kfree_skb(origskb);
@@@ -800,26 -797,6 +800,26 @@@ static ieee80211_rx_result ieee80211_rx
        return RX_CONTINUE;
  }
  
 +static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
 +                                            int index)
 +{
 +      struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
 +      struct sk_buff *tail = skb_peek_tail(frames);
 +      struct ieee80211_rx_status *status;
 +
 +      if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
 +              return true;
 +
 +      if (!tail)
 +              return false;
 +
 +      status = IEEE80211_SKB_RXCB(tail);
 +      if (status->flag & RX_FLAG_AMSDU_MORE)
 +              return false;
 +
 +      return true;
 +}
 +
  static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
                                            struct tid_ampdu_rx *tid_agg_rx,
                                            int index,
        if (skb_queue_empty(skb_list))
                goto no_frame;
  
 -      if (!ieee80211_rx_reorder_ready(skb_list)) {
 +      if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
                __skb_queue_purge(skb_list);
                goto no_frame;
        }
        }
  
  no_frame:
 +      tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
        tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
  }
  
@@@ -889,7 -865,7 +889,7 @@@ static void ieee80211_sta_reorder_relea
  
        /* release the buffer until next missing frame */
        index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
 -      if (!ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index]) &&
 +      if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
            tid_agg_rx->stored_mpdu_num) {
                /*
                 * No buffers ready to be released, but check whether any
                int skipped = 1;
                for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
                     j = (j + 1) % tid_agg_rx->buf_size) {
 -                      if (!ieee80211_rx_reorder_ready(
 -                                      &tid_agg_rx->reorder_buf[j])) {
 +                      if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
                                skipped++;
                                continue;
                        }
                                 skipped) & IEEE80211_SN_MASK;
                        skipped = 0;
                }
 -      } else while (ieee80211_rx_reorder_ready(
 -                              &tid_agg_rx->reorder_buf[index])) {
 +      } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
                ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
                                                frames);
                index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
  
                for (; j != (index - 1) % tid_agg_rx->buf_size;
                     j = (j + 1) % tid_agg_rx->buf_size) {
 -                      if (ieee80211_rx_reorder_ready(
 -                                      &tid_agg_rx->reorder_buf[j]))
 +                      if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
                                break;
                }
  
@@@ -1007,7 -986,7 +1007,7 @@@ static bool ieee80211_sta_manage_reorde
        index = mpdu_seq_num % tid_agg_rx->buf_size;
  
        /* check if we already stored this frame */
 -      if (ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index])) {
 +      if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
                dev_kfree_skb(skb);
                goto out;
        }
@@@ -1120,9 -1099,6 +1120,9 @@@ ieee80211_rx_h_check_dup(struct ieee802
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  
 +      if (status->flag & RX_FLAG_DUP_VALIDATED)
 +              return RX_CONTINUE;
 +
        /*
         * Drop duplicate 802.11 retransmissions
         * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
@@@ -1777,7 -1753,7 +1777,7 @@@ ieee80211_reassemble_add(struct ieee802
        entry->seq = seq;
        entry->rx_queue = rx_queue;
        entry->last_frag = frag;
-       entry->ccmp = 0;
+       entry->check_sequential_pn = false;
        entry->extra_len = 0;
  
        return entry;
@@@ -1873,15 -1849,27 +1873,27 @@@ ieee80211_rx_h_defragment(struct ieee80
                                                 rx->seqno_idx, &(rx->skb));
                if (rx->key &&
                    (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
-                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) &&
+                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
+                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
+                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
                    ieee80211_has_protected(fc)) {
                        int queue = rx->security_idx;
-                       /* Store CCMP PN so that we can verify that the next
-                        * fragment has a sequential PN value. */
-                       entry->ccmp = 1;
+                       /* Store CCMP/GCMP PN so that we can verify that the
+                        * next fragment has a sequential PN value.
+                        */
+                       entry->check_sequential_pn = true;
                        memcpy(entry->last_pn,
                               rx->key->u.ccmp.rx_pn[queue],
                               IEEE80211_CCMP_PN_LEN);
+                       BUILD_BUG_ON(offsetof(struct ieee80211_key,
+                                             u.ccmp.rx_pn) !=
+                                    offsetof(struct ieee80211_key,
+                                             u.gcmp.rx_pn));
+                       BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
+                                    sizeof(rx->key->u.gcmp.rx_pn[queue]));
+                       BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
+                                    IEEE80211_GCMP_PN_LEN);
                }
                return RX_QUEUED;
        }
                return RX_DROP_MONITOR;
        }
  
-       /* Verify that MPDUs within one MSDU have sequential PN values.
-        * (IEEE 802.11i, 8.3.3.4.5) */
-       if (entry->ccmp) {
+       /* "The receiver shall discard MSDUs and MMPDUs whose constituent
+        *  MPDU PN values are not incrementing in steps of 1."
+        * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
+        * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
+        */
+       if (entry->check_sequential_pn) {
                int i;
                u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
                int queue;
                if (!rx->key ||
                    (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
-                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256))
+                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
+                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
+                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
                        return RX_DROP_UNUSABLE;
                memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
                for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
@@@ -2223,6 -2217,9 +2241,6 @@@ ieee80211_rx_h_amsdu(struct ieee80211_r
        skb->dev = dev;
        __skb_queue_head_init(&frame_list);
  
 -      if (skb_linearize(skb))
 -              return RX_DROP_UNUSABLE;
 -
        ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
                                 rx->sdata->vif.type,
                                 rx->local->hw.extra_tx_headroom, true);
@@@ -2252,7 -2249,7 +2270,7 @@@ ieee80211_rx_h_mesh_fwding(struct ieee8
        struct ieee80211_local *local = rx->local;
        struct ieee80211_sub_if_data *sdata = rx->sdata;
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 -      u16 q, hdrlen;
 +      u16 ac, q, hdrlen;
  
        hdr = (struct ieee80211_hdr *) skb->data;
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
                        spin_lock_bh(&mppath->state_lock);
                        if (!ether_addr_equal(mppath->mpp, mpp_addr))
                                memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
 +                      mppath->exp_time = jiffies;
                        spin_unlock_bh(&mppath->state_lock);
                }
                rcu_read_unlock();
            ether_addr_equal(sdata->vif.addr, hdr->addr3))
                return RX_CONTINUE;
  
 -      q = ieee80211_select_queue_80211(sdata, skb, hdr);
 +      ac = ieee80211_select_queue_80211(sdata, skb, hdr);
 +      q = sdata->vif.hw_queue[ac];
        if (ieee80211_queue_stopped(&local->hw, q)) {
                IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
                return RX_DROP_MONITOR;
@@@ -2761,11 -2756,6 +2779,11 @@@ ieee80211_rx_h_action(struct ieee80211_
                                                    opmode, status->band);
                        goto handled;
                }
 +              case WLAN_VHT_ACTION_GROUPID_MGMT: {
 +                      if (len < IEEE80211_MIN_ACTION_SIZE + 25)
 +                              goto invalid;
 +                      goto queue;
 +              }
                default:
                        break;
                }
@@@ -3101,7 -3091,7 +3119,7 @@@ static void ieee80211_rx_cooked_monitor
        ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
                                         false);
  
 -      skb_set_mac_header(skb, 0);
 +      skb_reset_mac_header(skb);
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb->pkt_type = PACKET_OTHERHOST;
        skb->protocol = htons(ETH_P_802_2);
@@@ -3303,85 -3293,6 +3321,85 @@@ void ieee80211_release_reorder_timeout(
        ieee80211_rx_handlers(&rx, &frames);
  }
  
 +void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
 +                                        u16 ssn, u64 filtered,
 +                                        u16 received_mpdus)
 +{
 +      struct sta_info *sta;
 +      struct tid_ampdu_rx *tid_agg_rx;
 +      struct sk_buff_head frames;
 +      struct ieee80211_rx_data rx = {
 +              /* This is OK -- must be QoS data frame */
 +              .security_idx = tid,
 +              .seqno_idx = tid,
 +      };
 +      int i, diff;
 +
 +      if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
 +              return;
 +
 +      __skb_queue_head_init(&frames);
 +
 +      sta = container_of(pubsta, struct sta_info, sta);
 +
 +      rx.sta = sta;
 +      rx.sdata = sta->sdata;
 +      rx.local = sta->local;
 +
 +      rcu_read_lock();
 +      tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
 +      if (!tid_agg_rx)
 +              goto out;
 +
 +      spin_lock_bh(&tid_agg_rx->reorder_lock);
 +
 +      if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
 +              int release;
 +
 +              /* release all frames in the reorder buffer */
 +              release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
 +                         IEEE80211_SN_MODULO;
 +              ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
 +                                               release, &frames);
 +              /* update ssn to match received ssn */
 +              tid_agg_rx->head_seq_num = ssn;
 +      } else {
 +              ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
 +                                               &frames);
 +      }
 +
 +      /* handle the case that received ssn is behind the mac ssn.
 +       * it can be tid_agg_rx->buf_size behind and still be valid */
 +      diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
 +      if (diff >= tid_agg_rx->buf_size) {
 +              tid_agg_rx->reorder_buf_filtered = 0;
 +              goto release;
 +      }
 +      filtered = filtered >> diff;
 +      ssn += diff;
 +
 +      /* update bitmap */
 +      for (i = 0; i < tid_agg_rx->buf_size; i++) {
 +              int index = (ssn + i) % tid_agg_rx->buf_size;
 +
 +              tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
 +              if (filtered & BIT_ULL(i))
 +                      tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
 +      }
 +
 +      /* now process also frames that the filter marking released */
 +      ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
 +
 +release:
 +      spin_unlock_bh(&tid_agg_rx->reorder_lock);
 +
 +      ieee80211_rx_handlers(&rx, &frames);
 +
 + out:
 +      rcu_read_unlock();
 +}
 +EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
 +
  /* main receive path */
  
  static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
                                return false;
                        /* ignore action frames to TDLS-peers */
                        if (ieee80211_is_action(hdr->frame_control) &&
+                           !is_broadcast_ether_addr(bssid) &&
                            !ether_addr_equal(bssid, hdr->addr1))
                                return false;
                }
diff --combined net/sched/act_ipt.c
index 89c41a1f358945e9ea4b04f5537df4f286e3e809,6b70399ab78121e723efc439b27122f18b52a206..350e134cffb32b04f3e4c2b4b3917051cd55b456
  
  #define IPT_TAB_MASK     15
  
 +static int ipt_net_id;
 +
 +static int xt_net_id;
 +
  static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook)
  {
        struct xt_tgchk_param par;
@@@ -66,6 -62,7 +66,7 @@@ static void ipt_destroy_target(struct x
        struct xt_tgdtor_param par = {
                .target   = t->u.kernel.target,
                .targinfo = t->data,
+               .family   = NFPROTO_IPV4,
        };
        if (par.target->destroy != NULL)
                par.target->destroy(&par);
@@@ -87,9 -84,8 +88,9 @@@ static const struct nla_policy ipt_poli
        [TCA_IPT_TARG]  = { .len = sizeof(struct xt_entry_target) },
  };
  
 -static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
 -                      struct tc_action *a, int ovr, int bind)
 +static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
 +                        struct nlattr *est, struct tc_action *a, int ovr,
 +                        int bind)
  {
        struct nlattr *tb[TCA_IPT_MAX + 1];
        struct tcf_ipt *ipt;
        if (tb[TCA_IPT_INDEX] != NULL)
                index = nla_get_u32(tb[TCA_IPT_INDEX]);
  
 -      if (!tcf_hash_check(index, a, bind) ) {
 -              ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind, false);
 +      if (!tcf_hash_check(tn, index, a, bind)) {
 +              ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
 +                                    false);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
        ipt->tcfi_hook  = hook;
        spin_unlock_bh(&ipt->tcf_lock);
        if (ret == ACT_P_CREATED)
 -              tcf_hash_insert(a);
 +              tcf_hash_insert(tn, a);
        return ret;
  
  err3:
@@@ -176,24 -171,6 +177,24 @@@ err1
        return err;
  }
  
 +static int tcf_ipt_init(struct net *net, struct nlattr *nla,
 +                      struct nlattr *est, struct tc_action *a, int ovr,
 +                      int bind)
 +{
 +      struct tc_action_net *tn = net_generic(net, ipt_net_id);
 +
 +      return __tcf_ipt_init(tn, nla, est, a, ovr, bind);
 +}
 +
 +static int tcf_xt_init(struct net *net, struct nlattr *nla,
 +                     struct nlattr *est, struct tc_action *a, int ovr,
 +                     int bind)
 +{
 +      struct tc_action_net *tn = net_generic(net, xt_net_id);
 +
 +      return __tcf_ipt_init(tn, nla, est, a, ovr, bind);
 +}
 +
  static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
                   struct tcf_result *res)
  {
        par.hooknum  = ipt->tcfi_hook;
        par.target   = ipt->tcfi_t->u.kernel.target;
        par.targinfo = ipt->tcfi_t->data;
+       par.family   = NFPROTO_IPV4;
        ret = par.target->target(skb, &par);
  
        switch (ret) {
@@@ -284,22 -262,6 +286,22 @@@ nla_put_failure
        return -1;
  }
  
 +static int tcf_ipt_walker(struct net *net, struct sk_buff *skb,
 +                        struct netlink_callback *cb, int type,
 +                        struct tc_action *a)
 +{
 +      struct tc_action_net *tn = net_generic(net, ipt_net_id);
 +
 +      return tcf_generic_walker(tn, skb, cb, type, a);
 +}
 +
 +static int tcf_ipt_search(struct net *net, struct tc_action *a, u32 index)
 +{
 +      struct tc_action_net *tn = net_generic(net, ipt_net_id);
 +
 +      return tcf_hash_search(tn, a, index);
 +}
 +
  static struct tc_action_ops act_ipt_ops = {
        .kind           =       "ipt",
        .type           =       TCA_ACT_IPT,
        .dump           =       tcf_ipt_dump,
        .cleanup        =       tcf_ipt_release,
        .init           =       tcf_ipt_init,
 +      .walk           =       tcf_ipt_walker,
 +      .lookup         =       tcf_ipt_search,
 +};
 +
 +static __net_init int ipt_init_net(struct net *net)
 +{
 +      struct tc_action_net *tn = net_generic(net, ipt_net_id);
 +
 +      return tc_action_net_init(tn, &act_ipt_ops, IPT_TAB_MASK);
 +}
 +
 +static void __net_exit ipt_exit_net(struct net *net)
 +{
 +      struct tc_action_net *tn = net_generic(net, ipt_net_id);
 +
 +      tc_action_net_exit(tn);
 +}
 +
 +static struct pernet_operations ipt_net_ops = {
 +      .init = ipt_init_net,
 +      .exit = ipt_exit_net,
 +      .id   = &ipt_net_id,
 +      .size = sizeof(struct tc_action_net),
  };
  
 +static int tcf_xt_walker(struct net *net, struct sk_buff *skb,
 +                       struct netlink_callback *cb, int type,
 +                       struct tc_action *a)
 +{
 +      struct tc_action_net *tn = net_generic(net, xt_net_id);
 +
 +      return tcf_generic_walker(tn, skb, cb, type, a);
 +}
 +
 +static int tcf_xt_search(struct net *net, struct tc_action *a, u32 index)
 +{
 +      struct tc_action_net *tn = net_generic(net, xt_net_id);
 +
 +      return tcf_hash_search(tn, a, index);
 +}
 +
  static struct tc_action_ops act_xt_ops = {
        .kind           =       "xt",
        .type           =       TCA_ACT_XT,
        .act            =       tcf_ipt,
        .dump           =       tcf_ipt_dump,
        .cleanup        =       tcf_ipt_release,
 -      .init           =       tcf_ipt_init,
 +      .init           =       tcf_xt_init,
 +      .walk           =       tcf_xt_walker,
 +      .lookup         =       tcf_xt_search,
 +};
 +
 +static __net_init int xt_init_net(struct net *net)
 +{
 +      struct tc_action_net *tn = net_generic(net, xt_net_id);
 +
 +      return tc_action_net_init(tn, &act_xt_ops, IPT_TAB_MASK);
 +}
 +
 +static void __net_exit xt_exit_net(struct net *net)
 +{
 +      struct tc_action_net *tn = net_generic(net, xt_net_id);
 +
 +      tc_action_net_exit(tn);
 +}
 +
 +static struct pernet_operations xt_net_ops = {
 +      .init = xt_init_net,
 +      .exit = xt_exit_net,
 +      .id   = &xt_net_id,
 +      .size = sizeof(struct tc_action_net),
  };
  
  MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
@@@ -391,13 -291,12 +393,13 @@@ static int __init ipt_init_module(void
  {
        int ret1, ret2;
  
 -      ret1 = tcf_register_action(&act_xt_ops, IPT_TAB_MASK);
 +      ret1 = tcf_register_action(&act_xt_ops, &xt_net_ops);
        if (ret1 < 0)
 -              printk("Failed to load xt action\n");
 -      ret2 = tcf_register_action(&act_ipt_ops, IPT_TAB_MASK);
 +              pr_err("Failed to load xt action\n");
 +
 +      ret2 = tcf_register_action(&act_ipt_ops, &ipt_net_ops);
        if (ret2 < 0)
 -              printk("Failed to load ipt action\n");
 +              pr_err("Failed to load ipt action\n");
  
        if (ret1 < 0 && ret2 < 0) {
                return ret1;
  
  static void __exit ipt_cleanup_module(void)
  {
 -      tcf_unregister_action(&act_xt_ops);
 -      tcf_unregister_action(&act_ipt_ops);
 +      tcf_unregister_action(&act_ipt_ops, &ipt_net_ops);
 +      tcf_unregister_action(&act_xt_ops, &xt_net_ops);
  }
  
  module_init(ipt_init_module);
diff --combined net/sctp/proc.c
index cfc3c7101a38b974bcfecb4b4a086a4550143414,963dffcc2618b0fa5d186460bf12960c21f9babe..5cfac8d5d3b39b7f1aa1b82812dd02eaca45f337
@@@ -161,6 -161,7 +161,6 @@@ static void sctp_seq_dump_remote_addrs(
        struct sctp_af *af;
  
        primary = &assoc->peer.primary_addr;
 -      rcu_read_lock();
        list_for_each_entry_rcu(transport, &assoc->peer.transport_addr_list,
                        transports) {
                addr = &transport->ipaddr;
                }
                af->seq_dump_addr(seq, addr);
        }
 -      rcu_read_unlock();
  }
  
  static void *sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
@@@ -480,7 -482,7 +480,7 @@@ static void sctp_remaddr_seq_stop(struc
  static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
  {
        struct sctp_association *assoc;
-       struct sctp_transport *tsp;
+       struct sctp_transport *transport, *tsp;
  
        if (v == SEQ_START_TOKEN) {
                seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX "
                return 0;
        }
  
-       tsp = (struct sctp_transport *)v;
-       if (!sctp_transport_hold(tsp))
+       transport = (struct sctp_transport *)v;
+       if (!sctp_transport_hold(transport))
                return 0;
-       assoc = tsp->asoc;
+       assoc = transport->asoc;
  
        list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
                                transports) {
                seq_printf(seq, "\n");
        }
  
-       sctp_transport_put(tsp);
+       sctp_transport_put(transport);
  
        return 0;
  }
diff --combined net/tipc/socket.c
index 56b8a96c2257b17b0168dbad2b695f816e87a39a,4d420bb273960cd6eac206753f25435970f2724d..3eeb50a27b89b6d9607b51b80a5ed5ce715235e4
@@@ -42,7 -42,6 +42,7 @@@
  #include "name_distr.h"
  #include "socket.h"
  #include "bcast.h"
 +#include "netlink.h"
  
  #define SS_LISTENING          -1      /* socket is listening */
  #define SS_READY              -2      /* socket is connectionless */
@@@ -127,6 -126,14 +127,6 @@@ static const struct proto_ops stream_op
  static const struct proto_ops msg_ops;
  static struct proto tipc_proto;
  
 -static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
 -      [TIPC_NLA_SOCK_UNSPEC]          = { .type = NLA_UNSPEC },
 -      [TIPC_NLA_SOCK_ADDR]            = { .type = NLA_U32 },
 -      [TIPC_NLA_SOCK_REF]             = { .type = NLA_U32 },
 -      [TIPC_NLA_SOCK_CON]             = { .type = NLA_NESTED },
 -      [TIPC_NLA_SOCK_HAS_PUBL]        = { .type = NLA_FLAG }
 -};
 -
  static const struct rhashtable_params tsk_rht_params;
  
  /*
@@@ -666,7 -673,7 +666,7 @@@ static int tipc_sendmcast(struct  socke
        struct tipc_sock *tsk = tipc_sk(sk);
        struct net *net = sock_net(sk);
        struct tipc_msg *mhdr = &tsk->phdr;
-       struct sk_buff_head *pktchain = &sk->sk_write_queue;
+       struct sk_buff_head pktchain;
        struct iov_iter save = msg->msg_iter;
        uint mtu;
        int rc;
        msg_set_nameupper(mhdr, seq->upper);
        msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
  
+       skb_queue_head_init(&pktchain);
  new_mtu:
        mtu = tipc_bcast_get_mtu(net);
-       rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
+       rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
        if (unlikely(rc < 0))
                return rc;
  
        do {
-               rc = tipc_bcast_xmit(net, pktchain);
+               rc = tipc_bcast_xmit(net, &pktchain);
                if (likely(!rc))
                        return dsz;
  
                        if (!rc)
                                continue;
                }
-               __skb_queue_purge(pktchain);
+               __skb_queue_purge(&pktchain);
                if (rc == -EMSGSIZE) {
                        msg->msg_iter = save;
                        goto new_mtu;
@@@ -856,7 -865,7 +858,7 @@@ static int __tipc_sendmsg(struct socke
        struct net *net = sock_net(sk);
        struct tipc_msg *mhdr = &tsk->phdr;
        u32 dnode, dport;
-       struct sk_buff_head *pktchain = &sk->sk_write_queue;
+       struct sk_buff_head pktchain;
        struct sk_buff *skb;
        struct tipc_name_seq *seq;
        struct iov_iter save;
                msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
        }
  
+       skb_queue_head_init(&pktchain);
        save = m->msg_iter;
  new_mtu:
        mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
-       rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain);
+       rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
        if (rc < 0)
                return rc;
  
        do {
-               skb = skb_peek(pktchain);
+               skb = skb_peek(&pktchain);
                TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
-               rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid);
+               rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
                if (likely(!rc)) {
                        if (sock->state != SS_READY)
                                sock->state = SS_CONNECTING;
                        if (!rc)
                                continue;
                }
-               __skb_queue_purge(pktchain);
+               __skb_queue_purge(&pktchain);
                if (rc == -EMSGSIZE) {
                        m->msg_iter = save;
                        goto new_mtu;
@@@ -1009,7 -1019,7 +1012,7 @@@ static int __tipc_send_stream(struct so
        struct net *net = sock_net(sk);
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_msg *mhdr = &tsk->phdr;
-       struct sk_buff_head *pktchain = &sk->sk_write_queue;
+       struct sk_buff_head pktchain;
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        u32 portid = tsk->portid;
        int rc = -EINVAL;
  
        timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
        dnode = tsk_peer_node(tsk);
+       skb_queue_head_init(&pktchain);
  
  next:
        save = m->msg_iter;
        mtu = tsk->max_pkt;
        send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
-       rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain);
+       rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
        if (unlikely(rc < 0))
                return rc;
        do {
                if (likely(!tsk_conn_cong(tsk))) {
-                       rc = tipc_node_xmit(net, pktchain, dnode, portid);
+                       rc = tipc_node_xmit(net, &pktchain, dnode, portid);
                        if (likely(!rc)) {
                                tsk->sent_unacked++;
                                sent += send;
                                goto next;
                        }
                        if (rc == -EMSGSIZE) {
-                               __skb_queue_purge(pktchain);
+                               __skb_queue_purge(&pktchain);
                                tsk->max_pkt = tipc_node_get_mtu(net, dnode,
                                                                 portid);
                                m->msg_iter = save;
                rc = tipc_wait_for_sndpkt(sock, &timeo);
        } while (!rc);
  
-       __skb_queue_purge(pktchain);
+       __skb_queue_purge(&pktchain);
        return sent ? sent : rc;
  }
  
diff --combined net/tipc/subscr.c
index 22963cafd5ede27d59ecb772d1cd5c4257cacb98,f9ff73a8d8154ff0a52f8c33a64077f5d8c57ae5..e6cb386fbf3469017f805e5db135d04b86a30d78
@@@ -92,42 -92,25 +92,42 @@@ static void tipc_subscrp_send_event(str
   *
   * Returns 1 if there is overlap, otherwise 0.
   */
 -int tipc_subscrp_check_overlap(struct tipc_subscription *sub, u32 found_lower,
 +int tipc_subscrp_check_overlap(struct tipc_name_seq *seq, u32 found_lower,
                               u32 found_upper)
  {
 -      if (found_lower < sub->seq.lower)
 -              found_lower = sub->seq.lower;
 -      if (found_upper > sub->seq.upper)
 -              found_upper = sub->seq.upper;
 +      if (found_lower < seq->lower)
 +              found_lower = seq->lower;
 +      if (found_upper > seq->upper)
 +              found_upper = seq->upper;
        if (found_lower > found_upper)
                return 0;
        return 1;
  }
  
 +u32 tipc_subscrp_convert_seq_type(u32 type, int swap)
 +{
 +      return htohl(type, swap);
 +}
 +
 +void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
 +                            struct tipc_name_seq *out)
 +{
 +      out->type = htohl(in->type, swap);
 +      out->lower = htohl(in->lower, swap);
 +      out->upper = htohl(in->upper, swap);
 +}
 +
  void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
                                 u32 found_upper, u32 event, u32 port_ref,
                                 u32 node, int must)
  {
 -      if (!tipc_subscrp_check_overlap(sub, found_lower, found_upper))
 +      struct tipc_name_seq seq;
 +
 +      tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
 +      if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
                return;
 -      if (!must && !(sub->filter & TIPC_SUB_PORTS))
 +      if (!must &&
 +          !(htohl(sub->evt.s.filter, sub->swap) & TIPC_SUB_PORTS))
                return;
  
        tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
@@@ -188,14 -171,12 +188,14 @@@ static struct tipc_subscriber *tipc_sub
  static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
  {
        struct tipc_subscription *sub, *temp;
 +      u32 timeout;
  
        spin_lock_bh(&subscriber->lock);
        /* Destroy any existing subscriptions for subscriber */
        list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
                                 subscrp_list) {
 -              if (del_timer(&sub->timer)) {
 +              timeout = htohl(sub->evt.s.timeout, sub->swap);
 +              if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
                        tipc_subscrp_delete(sub);
                        tipc_subscrb_put(subscriber);
                }
@@@ -219,16 -200,13 +219,16 @@@ static void tipc_subscrp_cancel(struct 
                                struct tipc_subscriber *subscriber)
  {
        struct tipc_subscription *sub, *temp;
 +      u32 timeout;
  
        spin_lock_bh(&subscriber->lock);
        /* Find first matching subscription, exit if not found */
        list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
                                 subscrp_list) {
                if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
 -                      if (del_timer(&sub->timer)) {
 +                      timeout = htohl(sub->evt.s.timeout, sub->swap);
 +                      if ((timeout == TIPC_WAIT_FOREVER) ||
 +                          del_timer(&sub->timer)) {
                                tipc_subscrp_delete(sub);
                                tipc_subscrb_put(subscriber);
                        }
        spin_unlock_bh(&subscriber->lock);
  }
  
 -static int tipc_subscrp_create(struct net *net, struct tipc_subscr *s,
 -                             struct tipc_subscriber *subscriber,
 -                             struct tipc_subscription **sub_p)
 +static struct tipc_subscription *tipc_subscrp_create(struct net *net,
 +                                                   struct tipc_subscr *s,
 +                                                   int swap)
  {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_subscription *sub;
 -      int swap;
 -
 -      /* Determine subscriber's endianness */
 -      swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
 -
 -      /* Detect & process a subscription cancellation request */
 -      if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
 -              s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
 -              tipc_subscrp_cancel(s, subscriber);
 -              return 0;
 -      }
 +      u32 filter = htohl(s->filter, swap);
  
        /* Refuse subscription if global limit exceeded */
        if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
                pr_warn("Subscription rejected, limit reached (%u)\n",
                        TIPC_MAX_SUBSCRIPTIONS);
 -              return -EINVAL;
 +              return NULL;
        }
  
        /* Allocate subscription object */
        sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
        if (!sub) {
                pr_warn("Subscription rejected, no memory\n");
 -              return -ENOMEM;
 +              return NULL;
        }
  
        /* Initialize subscription object */
        sub->net = net;
 -      sub->seq.type = htohl(s->seq.type, swap);
 -      sub->seq.lower = htohl(s->seq.lower, swap);
 -      sub->seq.upper = htohl(s->seq.upper, swap);
 -      sub->timeout = msecs_to_jiffies(htohl(s->timeout, swap));
 -      sub->filter = htohl(s->filter, swap);
 -      if ((!(sub->filter & TIPC_SUB_PORTS) ==
 -           !(sub->filter & TIPC_SUB_SERVICE)) ||
 -          (sub->seq.lower > sub->seq.upper)) {
 +      if (((filter & TIPC_SUB_PORTS) && (filter & TIPC_SUB_SERVICE)) ||
 +          (htohl(s->seq.lower, swap) > htohl(s->seq.upper, swap))) {
                pr_warn("Subscription rejected, illegal request\n");
                kfree(sub);
 -              return -EINVAL;
 +              return NULL;
        }
 -      spin_lock_bh(&subscriber->lock);
 -      list_add(&sub->subscrp_list, &subscriber->subscrp_list);
 -      spin_unlock_bh(&subscriber->lock);
 -      sub->subscriber = subscriber;
 +
        sub->swap = swap;
        memcpy(&sub->evt.s, s, sizeof(*s));
        atomic_inc(&tn->subscription_count);
 +      return sub;
 +}
 +
 +static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
 +                                 struct tipc_subscriber *subscriber, int swap)
 +{
 +      struct tipc_net *tn = net_generic(net, tipc_net_id);
 +      struct tipc_subscription *sub = NULL;
 +      u32 timeout;
 +
 +      sub = tipc_subscrp_create(net, s, swap);
 +      if (!sub)
 +              return tipc_conn_terminate(tn->topsrv, subscriber->conid);
 +
 +      spin_lock_bh(&subscriber->lock);
 +      list_add(&sub->subscrp_list, &subscriber->subscrp_list);
 +      tipc_subscrb_get(subscriber);
 +      sub->subscriber = subscriber;
 +      tipc_nametbl_subscribe(sub);
 +      spin_unlock_bh(&subscriber->lock);
 +
 +      timeout = htohl(sub->evt.s.timeout, swap);
 +      if (timeout == TIPC_WAIT_FOREVER)
 +              return;
 +
        setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
 -      if (sub->timeout != TIPC_WAIT_FOREVER)
 -              sub->timeout += jiffies;
 -      if (!mod_timer(&sub->timer, sub->timeout))
 -              tipc_subscrb_get(subscriber);
 -      *sub_p = sub;
 -      return 0;
 +      mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
  }
  
  /* Handle one termination request for the subscriber */
@@@ -312,21 -289,15 +312,22 @@@ static void tipc_subscrb_rcv_cb(struct 
                                struct sockaddr_tipc *addr, void *usr_data,
                                void *buf, size_t len)
  {
 -      struct tipc_subscriber *subscrb = usr_data;
 -      struct tipc_subscription *sub = NULL;
 -      struct tipc_net *tn = net_generic(net, tipc_net_id);
 +      struct tipc_subscriber *subscriber = usr_data;
 +      struct tipc_subscr *s = (struct tipc_subscr *)buf;
 +      int swap;
  
 -      if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
 -              return tipc_conn_terminate(tn->topsrv, subscrb->conid);
 +      /* Determine subscriber's endianness */
 +      swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE |
 +                            TIPC_SUB_CANCEL));
 +
 +      /* Detect & process a subscription cancellation request */
 +      if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
 +              s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
 +              return tipc_subscrp_cancel(s, subscriber);
 +      }
  
-       tipc_subscrp_subscribe(net, s, subscriber, swap);
 -      if (sub)
 -              tipc_nametbl_subscribe(sub);
++      if (s)
++              tipc_subscrp_subscribe(net, s, subscriber, swap);
  }
  
  /* Handle one request to establish a new subscriber */
diff --combined net/wireless/core.c
index 3a9c41bc849aa8f32fbc75eafe564b3a5385c4db,8f0bac7e03c406466e0b5ca2b01b3ca2574ab79f..9f1c4aa851efdf55ab81044b10ef58b7bd73ecf6
@@@ -352,16 -352,6 +352,16 @@@ struct wiphy *wiphy_new_nm(const struc
        WARN_ON(ops->add_station && !ops->del_station);
        WARN_ON(ops->add_mpath && !ops->del_mpath);
        WARN_ON(ops->join_mesh && !ops->leave_mesh);
 +      WARN_ON(ops->start_p2p_device && !ops->stop_p2p_device);
 +      WARN_ON(ops->start_ap && !ops->stop_ap);
 +      WARN_ON(ops->join_ocb && !ops->leave_ocb);
 +      WARN_ON(ops->suspend && !ops->resume);
 +      WARN_ON(ops->sched_scan_start && !ops->sched_scan_stop);
 +      WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
 +      WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
 +      WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
 +      WARN_ON(ops->set_tx_power && !ops->get_tx_power);
 +      WARN_ON(ops->set_antenna && !ops->get_antenna);
  
        alloc_size = sizeof(*rdev) + sizeof_priv;
  
@@@ -1157,6 -1147,8 +1157,8 @@@ static int cfg80211_netdev_notifier_cal
                return NOTIFY_DONE;
        }
  
+       wireless_nlevent_flush();
        return NOTIFY_OK;
  }
  
diff --combined net/wireless/nl80211.c
index 90890f183c0e51dfdade4a46c96ffe9080290ffe,711cb7ad6ae011132b868d928ebe23037040db82..98c924260b3d312055c598255cff8478bccba1e9
@@@ -3,7 -3,7 +3,7 @@@
   *
   * Copyright 2006-2010        Johannes Berg <johannes@sipsolutions.net>
   * Copyright 2013-2014  Intel Mobile Communications GmbH
 - * Copyright 2015     Intel Deutschland GmbH
 + * Copyright 2015-2016        Intel Deutschland GmbH
   */
  
  #include <linux/if.h>
@@@ -401,7 -401,6 +401,7 @@@ static const struct nla_policy nl80211_
        [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
        [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
        [NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
 +      [NL80211_ATTR_PBSS] = { .type = NLA_FLAG },
  };
  
  /* policy for the key attributes */
@@@ -3462,10 -3461,6 +3462,10 @@@ static int nl80211_start_ap(struct sk_b
                        return PTR_ERR(params.acl);
        }
  
 +      params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
 +      if (params.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ])
 +              return -EOPNOTSUPP;
 +
        wdev_lock(wdev);
        err = rdev_start_ap(rdev, dev, &params);
        if (!err) {
@@@ -7286,11 -7281,9 +7286,11 @@@ static int nl80211_associate(struct sk_
        }
  
        if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) {
 -              if (!(rdev->wiphy.features &
 -                    NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) ||
 -                  !(rdev->wiphy.features & NL80211_FEATURE_QUIET))
 +              if (!((rdev->wiphy.features &
 +                      NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) &&
 +                     (rdev->wiphy.features & NL80211_FEATURE_QUIET)) &&
 +                  !wiphy_ext_feature_isset(&rdev->wiphy,
 +                                           NL80211_EXT_FEATURE_RRM))
                        return -EINVAL;
                req.flags |= ASSOC_REQ_USE_RRM;
        }
@@@ -7554,7 -7547,7 +7554,7 @@@ static int nl80211_join_ibss(struct sk_
  
                if ((ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) &&
                    no_ht) {
-                       kfree(connkeys);
+                       kzfree(connkeys);
                        return -EINVAL;
                }
        }
@@@ -7978,23 -7971,15 +7978,23 @@@ static int nl80211_connect(struct sk_bu
        }
  
        if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) {
 -              if (!(rdev->wiphy.features &
 -                    NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) ||
 -                  !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) {
 +              if (!((rdev->wiphy.features &
 +                      NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) &&
 +                     (rdev->wiphy.features & NL80211_FEATURE_QUIET)) &&
 +                  !wiphy_ext_feature_isset(&rdev->wiphy,
 +                                           NL80211_EXT_FEATURE_RRM)) {
                        kzfree(connkeys);
                        return -EINVAL;
                }
                connect.flags |= ASSOC_REQ_USE_RRM;
        }
  
 +      connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
 +      if (connect.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) {
 +              kzfree(connkeys);
 +              return -EOPNOTSUPP;
 +      }
 +
        wdev_lock(dev->ieee80211_ptr);
        err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL);
        wdev_unlock(dev->ieee80211_ptr);
diff --combined net/wireless/sme.c
index 79bd3a171caa83ed8ae811b744fd271475b0257a,d49ed7666d4cb3e5a20641ebc0acfd3ccda80ff9..5445581717874e1409d994f24c9ca7263e62ab38
@@@ -264,7 -264,7 +264,7 @@@ static struct cfg80211_bss *cfg80211_ge
                               wdev->conn->params.bssid,
                               wdev->conn->params.ssid,
                               wdev->conn->params.ssid_len,
 -                             IEEE80211_BSS_TYPE_ESS,
 +                             wdev->conn_bss_type,
                               IEEE80211_PRIVACY(wdev->conn->params.privacy));
        if (!bss)
                return NULL;
@@@ -687,7 -687,7 +687,7 @@@ void __cfg80211_connect_result(struct n
                WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
                bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
                                       wdev->ssid, wdev->ssid_len,
 -                                     IEEE80211_BSS_TYPE_ESS,
 +                                     wdev->conn_bss_type,
                                       IEEE80211_PRIVACY_ANY);
                if (bss)
                        cfg80211_hold_bss(bss_from_pub(bss));
@@@ -846,7 -846,7 +846,7 @@@ void cfg80211_roamed(struct net_device 
  
        bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
                               wdev->ssid_len,
 -                             IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
 +                             wdev->conn_bss_type, IEEE80211_PRIVACY_ANY);
        if (WARN_ON(!bss))
                return;
  
@@@ -917,6 -917,12 +917,12 @@@ void __cfg80211_disconnected(struct net
  
        nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
  
+       /* stop critical protocol if supported */
+       if (rdev->ops->crit_proto_stop && rdev->crit_proto_nlportid) {
+               rdev->crit_proto_nlportid = 0;
+               rdev_crit_proto_stop(rdev, wdev);
+       }
        /*
         * Delete all the keys ... pairwise keys can't really
         * exist any more anyway, but default keys might.
@@@ -1017,9 -1023,6 +1023,9 @@@ int cfg80211_connect(struct cfg80211_re
        memcpy(wdev->ssid, connect->ssid, connect->ssid_len);
        wdev->ssid_len = connect->ssid_len;
  
 +      wdev->conn_bss_type = connect->pbss ? IEEE80211_BSS_TYPE_PBSS :
 +                                            IEEE80211_BSS_TYPE_ESS;
 +
        if (!rdev->ops->connect)
                err = cfg80211_sme_connect(wdev, connect, prev_bssid);
        else